Merge branch 'integration' into for-linus
[pandora-kernel.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/crash_dump.h>
25 #include <linux/root_dev.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/fs.h>
30 #include <linux/proc_fs.h>
31 #include <linux/memblock.h>
32
33 #include <asm/unified.h>
34 #include <asm/cpu.h>
35 #include <asm/cputype.h>
36 #include <asm/elf.h>
37 #include <asm/procinfo.h>
38 #include <asm/sections.h>
39 #include <asm/setup.h>
40 #include <asm/smp_plat.h>
41 #include <asm/mach-types.h>
42 #include <asm/cacheflush.h>
43 #include <asm/cachetype.h>
44 #include <asm/tlbflush.h>
45
46 #include <asm/prom.h>
47 #include <asm/mach/arch.h>
48 #include <asm/mach/irq.h>
49 #include <asm/mach/time.h>
50 #include <asm/traps.h>
51 #include <asm/unwind.h>
52
53 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
54 #include "compat.h"
55 #endif
56 #include "atags.h"
57 #include "tcm.h"
58
59 #ifndef MEM_SIZE
60 #define MEM_SIZE        (16*1024*1024)
61 #endif
62
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65
66 static int __init fpe_setup(char *line)
67 {
68         memcpy(fpe_type, line, 8);
69         return 1;
70 }
71
72 __setup("fpe=", fpe_setup);
73 #endif
74
75 extern void paging_init(struct machine_desc *desc);
76 extern void sanity_check_meminfo(void);
77 extern void reboot_setup(char *str);
78
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85
86 unsigned int __atags_pointer __initdata;
87
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99
100
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117
118 struct stack {
119         u32 irq[3];
120         u32 abt[3];
121         u32 und[3];
122 } ____cacheline_aligned;
123
124 static struct stack stacks[NR_CPUS];
125
126 char elf_platform[ELF_PLATFORM_SIZE];
127 EXPORT_SYMBOL(elf_platform);
128
129 static const char *cpu_name;
130 static const char *machine_name;
131 static char __initdata cmd_line[COMMAND_LINE_SIZE];
132 struct machine_desc *machine_desc __initdata;
133
134 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
135 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
136 #define ENDIANNESS ((char)endian_test.l)
137
138 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
139
140 /*
141  * Standard memory resources
142  */
143 static struct resource mem_res[] = {
144         {
145                 .name = "Video RAM",
146                 .start = 0,
147                 .end = 0,
148                 .flags = IORESOURCE_MEM
149         },
150         {
151                 .name = "Kernel text",
152                 .start = 0,
153                 .end = 0,
154                 .flags = IORESOURCE_MEM
155         },
156         {
157                 .name = "Kernel data",
158                 .start = 0,
159                 .end = 0,
160                 .flags = IORESOURCE_MEM
161         }
162 };
163
164 #define video_ram   mem_res[0]
165 #define kernel_code mem_res[1]
166 #define kernel_data mem_res[2]
167
168 static struct resource io_res[] = {
169         {
170                 .name = "reserved",
171                 .start = 0x3bc,
172                 .end = 0x3be,
173                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
174         },
175         {
176                 .name = "reserved",
177                 .start = 0x378,
178                 .end = 0x37f,
179                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
180         },
181         {
182                 .name = "reserved",
183                 .start = 0x278,
184                 .end = 0x27f,
185                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186         }
187 };
188
189 #define lp0 io_res[0]
190 #define lp1 io_res[1]
191 #define lp2 io_res[2]
192
193 static const char *proc_arch[] = {
194         "undefined/unknown",
195         "3",
196         "4",
197         "4T",
198         "5",
199         "5T",
200         "5TE",
201         "5TEJ",
202         "6TEJ",
203         "7",
204         "?(11)",
205         "?(12)",
206         "?(13)",
207         "?(14)",
208         "?(15)",
209         "?(16)",
210         "?(17)",
211 };
212
213 int cpu_architecture(void)
214 {
215         int cpu_arch;
216
217         if ((read_cpuid_id() & 0x0008f000) == 0) {
218                 cpu_arch = CPU_ARCH_UNKNOWN;
219         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
220                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
221         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
222                 cpu_arch = (read_cpuid_id() >> 16) & 7;
223                 if (cpu_arch)
224                         cpu_arch += CPU_ARCH_ARMv3;
225         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
226                 unsigned int mmfr0;
227
228                 /* Revised CPUID format. Read the Memory Model Feature
229                  * Register 0 and check for VMSAv7 or PMSAv7 */
230                 asm("mrc        p15, 0, %0, c0, c1, 4"
231                     : "=r" (mmfr0));
232                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
233                     (mmfr0 & 0x000000f0) >= 0x00000030)
234                         cpu_arch = CPU_ARCH_ARMv7;
235                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
236                          (mmfr0 & 0x000000f0) == 0x00000020)
237                         cpu_arch = CPU_ARCH_ARMv6;
238                 else
239                         cpu_arch = CPU_ARCH_UNKNOWN;
240         } else
241                 cpu_arch = CPU_ARCH_UNKNOWN;
242
243         return cpu_arch;
244 }
245
246 static int cpu_has_aliasing_icache(unsigned int arch)
247 {
248         int aliasing_icache;
249         unsigned int id_reg, num_sets, line_size;
250
251         /* arch specifies the register format */
252         switch (arch) {
253         case CPU_ARCH_ARMv7:
254                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
255                     : /* No output operands */
256                     : "r" (1));
257                 isb();
258                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
259                     : "=r" (id_reg));
260                 line_size = 4 << ((id_reg & 0x7) + 2);
261                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
262                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
263                 break;
264         case CPU_ARCH_ARMv6:
265                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
266                 break;
267         default:
268                 /* I-cache aliases will be handled by D-cache aliasing code */
269                 aliasing_icache = 0;
270         }
271
272         return aliasing_icache;
273 }
274
275 static void __init cacheid_init(void)
276 {
277         unsigned int cachetype = read_cpuid_cachetype();
278         unsigned int arch = cpu_architecture();
279
280         if (arch >= CPU_ARCH_ARMv6) {
281                 if ((cachetype & (7 << 29)) == 4 << 29) {
282                         /* ARMv7 register format */
283                         cacheid = CACHEID_VIPT_NONALIASING;
284                         if ((cachetype & (3 << 14)) == 1 << 14)
285                                 cacheid |= CACHEID_ASID_TAGGED;
286                         else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
287                                 cacheid |= CACHEID_VIPT_I_ALIASING;
288                 } else if (cachetype & (1 << 23)) {
289                         cacheid = CACHEID_VIPT_ALIASING;
290                 } else {
291                         cacheid = CACHEID_VIPT_NONALIASING;
292                         if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
293                                 cacheid |= CACHEID_VIPT_I_ALIASING;
294                 }
295         } else {
296                 cacheid = CACHEID_VIVT;
297         }
298
299         printk("CPU: %s data cache, %s instruction cache\n",
300                 cache_is_vivt() ? "VIVT" :
301                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
302                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
303                 cache_is_vivt() ? "VIVT" :
304                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
305                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
306                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
307 }
308
309 /*
310  * These functions re-use the assembly code in head.S, which
311  * already provide the required functionality.
312  */
313 extern struct proc_info_list *lookup_processor_type(unsigned int);
314
315 void __init early_print(const char *str, ...)
316 {
317         extern void printascii(const char *);
318         char buf[256];
319         va_list ap;
320
321         va_start(ap, str);
322         vsnprintf(buf, sizeof(buf), str, ap);
323         va_end(ap);
324
325 #ifdef CONFIG_DEBUG_LL
326         printascii(buf);
327 #endif
328         printk("%s", buf);
329 }
330
331 static void __init feat_v6_fixup(void)
332 {
333         int id = read_cpuid_id();
334
335         if ((id & 0xff0f0000) != 0x41070000)
336                 return;
337
338         /*
339          * HWCAP_TLS is available only on 1136 r1p0 and later,
340          * see also kuser_get_tls_init.
341          */
342         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
343                 elf_hwcap &= ~HWCAP_TLS;
344 }
345
346 static void __init setup_processor(void)
347 {
348         struct proc_info_list *list;
349
350         /*
351          * locate processor in the list of supported processor
352          * types.  The linker builds this table for us from the
353          * entries in arch/arm/mm/proc-*.S
354          */
355         list = lookup_processor_type(read_cpuid_id());
356         if (!list) {
357                 printk("CPU configuration botched (ID %08x), unable "
358                        "to continue.\n", read_cpuid_id());
359                 while (1);
360         }
361
362         cpu_name = list->cpu_name;
363
364 #ifdef MULTI_CPU
365         processor = *list->proc;
366 #endif
367 #ifdef MULTI_TLB
368         cpu_tlb = *list->tlb;
369 #endif
370 #ifdef MULTI_USER
371         cpu_user = *list->user;
372 #endif
373 #ifdef MULTI_CACHE
374         cpu_cache = *list->cache;
375 #endif
376
377         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
378                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
379                proc_arch[cpu_architecture()], cr_alignment);
380
381         sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
382         sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
383         elf_hwcap = list->elf_hwcap;
384 #ifndef CONFIG_ARM_THUMB
385         elf_hwcap &= ~HWCAP_THUMB;
386 #endif
387
388         feat_v6_fixup();
389
390         cacheid_init();
391         cpu_proc_init();
392 }
393
394 /*
395  * cpu_init - initialise one CPU.
396  *
397  * cpu_init sets up the per-CPU stacks.
398  */
399 void cpu_init(void)
400 {
401         unsigned int cpu = smp_processor_id();
402         struct stack *stk = &stacks[cpu];
403
404         if (cpu >= NR_CPUS) {
405                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
406                 BUG();
407         }
408
409         /*
410          * Define the placement constraint for the inline asm directive below.
411          * In Thumb-2, msr with an immediate value is not allowed.
412          */
413 #ifdef CONFIG_THUMB2_KERNEL
414 #define PLC     "r"
415 #else
416 #define PLC     "I"
417 #endif
418
419         /*
420          * setup stacks for re-entrant exception handlers
421          */
422         __asm__ (
423         "msr    cpsr_c, %1\n\t"
424         "add    r14, %0, %2\n\t"
425         "mov    sp, r14\n\t"
426         "msr    cpsr_c, %3\n\t"
427         "add    r14, %0, %4\n\t"
428         "mov    sp, r14\n\t"
429         "msr    cpsr_c, %5\n\t"
430         "add    r14, %0, %6\n\t"
431         "mov    sp, r14\n\t"
432         "msr    cpsr_c, %7"
433             :
434             : "r" (stk),
435               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
436               "I" (offsetof(struct stack, irq[0])),
437               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
438               "I" (offsetof(struct stack, abt[0])),
439               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
440               "I" (offsetof(struct stack, und[0])),
441               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
442             : "r14");
443 }
444
445 void __init dump_machine_table(void)
446 {
447         struct machine_desc *p;
448
449         early_print("Available machine support:\n\nID (hex)\tNAME\n");
450         for_each_machine_desc(p)
451                 early_print("%08x\t%s\n", p->nr, p->name);
452
453         early_print("\nPlease check your kernel config and/or bootloader.\n");
454
455         while (true)
456                 /* can't use cpu_relax() here as it may require MMU setup */;
457 }
458
459 int __init arm_add_memory(phys_addr_t start, unsigned long size)
460 {
461         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
462
463         if (meminfo.nr_banks >= NR_BANKS) {
464                 printk(KERN_CRIT "NR_BANKS too low, "
465                         "ignoring memory at 0x%08llx\n", (long long)start);
466                 return -EINVAL;
467         }
468
469         /*
470          * Ensure that start/size are aligned to a page boundary.
471          * Size is appropriately rounded down, start is rounded up.
472          */
473         size -= start & ~PAGE_MASK;
474         bank->start = PAGE_ALIGN(start);
475         bank->size  = size & PAGE_MASK;
476
477         /*
478          * Check whether this memory region has non-zero size or
479          * invalid node number.
480          */
481         if (bank->size == 0)
482                 return -EINVAL;
483
484         meminfo.nr_banks++;
485         return 0;
486 }
487
488 /*
489  * Pick out the memory size.  We look for mem=size@start,
490  * where start and size are "size[KkMm]"
491  */
492 static int __init early_mem(char *p)
493 {
494         static int usermem __initdata = 0;
495         unsigned long size;
496         phys_addr_t start;
497         char *endp;
498
499         /*
500          * If the user specifies memory size, we
501          * blow away any automatically generated
502          * size.
503          */
504         if (usermem == 0) {
505                 usermem = 1;
506                 meminfo.nr_banks = 0;
507         }
508
509         start = PHYS_OFFSET;
510         size  = memparse(p, &endp);
511         if (*endp == '@')
512                 start = memparse(endp + 1, NULL);
513
514         arm_add_memory(start, size);
515
516         return 0;
517 }
518 early_param("mem", early_mem);
519
520 static void __init
521 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
522 {
523 #ifdef CONFIG_BLK_DEV_RAM
524         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
525
526         rd_image_start = image_start;
527         rd_prompt = prompt;
528         rd_doload = doload;
529
530         if (rd_sz)
531                 rd_size = rd_sz;
532 #endif
533 }
534
535 static void __init request_standard_resources(struct machine_desc *mdesc)
536 {
537         struct memblock_region *region;
538         struct resource *res;
539
540         kernel_code.start   = virt_to_phys(_text);
541         kernel_code.end     = virt_to_phys(_etext - 1);
542         kernel_data.start   = virt_to_phys(_sdata);
543         kernel_data.end     = virt_to_phys(_end - 1);
544
545         for_each_memblock(memory, region) {
546                 res = alloc_bootmem_low(sizeof(*res));
547                 res->name  = "System RAM";
548                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
549                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
550                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
551
552                 request_resource(&iomem_resource, res);
553
554                 if (kernel_code.start >= res->start &&
555                     kernel_code.end <= res->end)
556                         request_resource(res, &kernel_code);
557                 if (kernel_data.start >= res->start &&
558                     kernel_data.end <= res->end)
559                         request_resource(res, &kernel_data);
560         }
561
562         if (mdesc->video_start) {
563                 video_ram.start = mdesc->video_start;
564                 video_ram.end   = mdesc->video_end;
565                 request_resource(&iomem_resource, &video_ram);
566         }
567
568         /*
569          * Some machines don't have the possibility of ever
570          * possessing lp0, lp1 or lp2
571          */
572         if (mdesc->reserve_lp0)
573                 request_resource(&ioport_resource, &lp0);
574         if (mdesc->reserve_lp1)
575                 request_resource(&ioport_resource, &lp1);
576         if (mdesc->reserve_lp2)
577                 request_resource(&ioport_resource, &lp2);
578 }
579
580 /*
581  *  Tag parsing.
582  *
583  * This is the new way of passing data to the kernel at boot time.  Rather
584  * than passing a fixed inflexible structure to the kernel, we pass a list
585  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
586  * tag for the list to be recognised (to distinguish the tagged list from
587  * a param_struct).  The list is terminated with a zero-length tag (this tag
588  * is not parsed in any way).
589  */
590 static int __init parse_tag_core(const struct tag *tag)
591 {
592         if (tag->hdr.size > 2) {
593                 if ((tag->u.core.flags & 1) == 0)
594                         root_mountflags &= ~MS_RDONLY;
595                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
596         }
597         return 0;
598 }
599
600 __tagtable(ATAG_CORE, parse_tag_core);
601
602 static int __init parse_tag_mem32(const struct tag *tag)
603 {
604         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
605 }
606
607 __tagtable(ATAG_MEM, parse_tag_mem32);
608
609 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
610 struct screen_info screen_info = {
611  .orig_video_lines      = 30,
612  .orig_video_cols       = 80,
613  .orig_video_mode       = 0,
614  .orig_video_ega_bx     = 0,
615  .orig_video_isVGA      = 1,
616  .orig_video_points     = 8
617 };
618
619 static int __init parse_tag_videotext(const struct tag *tag)
620 {
621         screen_info.orig_x            = tag->u.videotext.x;
622         screen_info.orig_y            = tag->u.videotext.y;
623         screen_info.orig_video_page   = tag->u.videotext.video_page;
624         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
625         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
626         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
627         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
628         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
629         screen_info.orig_video_points = tag->u.videotext.video_points;
630         return 0;
631 }
632
633 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
634 #endif
635
636 static int __init parse_tag_ramdisk(const struct tag *tag)
637 {
638         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
639                       (tag->u.ramdisk.flags & 2) == 0,
640                       tag->u.ramdisk.start, tag->u.ramdisk.size);
641         return 0;
642 }
643
644 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
645
646 static int __init parse_tag_serialnr(const struct tag *tag)
647 {
648         system_serial_low = tag->u.serialnr.low;
649         system_serial_high = tag->u.serialnr.high;
650         return 0;
651 }
652
653 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
654
655 static int __init parse_tag_revision(const struct tag *tag)
656 {
657         system_rev = tag->u.revision.rev;
658         return 0;
659 }
660
661 __tagtable(ATAG_REVISION, parse_tag_revision);
662
663 static int __init parse_tag_cmdline(const struct tag *tag)
664 {
665 #if defined(CONFIG_CMDLINE_EXTEND)
666         strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
667         strlcat(default_command_line, tag->u.cmdline.cmdline,
668                 COMMAND_LINE_SIZE);
669 #elif defined(CONFIG_CMDLINE_FORCE)
670         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
671 #else
672         strlcpy(default_command_line, tag->u.cmdline.cmdline,
673                 COMMAND_LINE_SIZE);
674 #endif
675         return 0;
676 }
677
678 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
679
680 /*
681  * Scan the tag table for this tag, and call its parse function.
682  * The tag table is built by the linker from all the __tagtable
683  * declarations.
684  */
685 static int __init parse_tag(const struct tag *tag)
686 {
687         extern struct tagtable __tagtable_begin, __tagtable_end;
688         struct tagtable *t;
689
690         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
691                 if (tag->hdr.tag == t->tag) {
692                         t->parse(tag);
693                         break;
694                 }
695
696         return t < &__tagtable_end;
697 }
698
699 /*
700  * Parse all tags in the list, checking both the global and architecture
701  * specific tag tables.
702  */
703 static void __init parse_tags(const struct tag *t)
704 {
705         for (; t->hdr.size; t = tag_next(t))
706                 if (!parse_tag(t))
707                         printk(KERN_WARNING
708                                 "Ignoring unrecognised tag 0x%08x\n",
709                                 t->hdr.tag);
710 }
711
712 /*
713  * This holds our defaults.
714  */
715 static struct init_tags {
716         struct tag_header hdr1;
717         struct tag_core   core;
718         struct tag_header hdr2;
719         struct tag_mem32  mem;
720         struct tag_header hdr3;
721 } init_tags __initdata = {
722         { tag_size(tag_core), ATAG_CORE },
723         { 1, PAGE_SIZE, 0xff },
724         { tag_size(tag_mem32), ATAG_MEM },
725         { MEM_SIZE },
726         { 0, ATAG_NONE }
727 };
728
729 static int __init customize_machine(void)
730 {
731         /* customizes platform devices, or adds new ones */
732         if (machine_desc->init_machine)
733                 machine_desc->init_machine();
734         return 0;
735 }
736 arch_initcall(customize_machine);
737
738 #ifdef CONFIG_KEXEC
739 static inline unsigned long long get_total_mem(void)
740 {
741         unsigned long total;
742
743         total = max_low_pfn - min_low_pfn;
744         return total << PAGE_SHIFT;
745 }
746
747 /**
748  * reserve_crashkernel() - reserves memory are for crash kernel
749  *
750  * This function reserves memory area given in "crashkernel=" kernel command
751  * line parameter. The memory reserved is used by a dump capture kernel when
752  * primary kernel is crashing.
753  */
754 static void __init reserve_crashkernel(void)
755 {
756         unsigned long long crash_size, crash_base;
757         unsigned long long total_mem;
758         int ret;
759
760         total_mem = get_total_mem();
761         ret = parse_crashkernel(boot_command_line, total_mem,
762                                 &crash_size, &crash_base);
763         if (ret)
764                 return;
765
766         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
767         if (ret < 0) {
768                 printk(KERN_WARNING "crashkernel reservation failed - "
769                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
770                 return;
771         }
772
773         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
774                "for crashkernel (System RAM: %ldMB)\n",
775                (unsigned long)(crash_size >> 20),
776                (unsigned long)(crash_base >> 20),
777                (unsigned long)(total_mem >> 20));
778
779         crashk_res.start = crash_base;
780         crashk_res.end = crash_base + crash_size - 1;
781         insert_resource(&iomem_resource, &crashk_res);
782 }
783 #else
784 static inline void reserve_crashkernel(void) {}
785 #endif /* CONFIG_KEXEC */
786
787 static void __init squash_mem_tags(struct tag *tag)
788 {
789         for (; tag->hdr.size; tag = tag_next(tag))
790                 if (tag->hdr.tag == ATAG_MEM)
791                         tag->hdr.tag = ATAG_NONE;
792 }
793
794 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
795 {
796         struct tag *tags = (struct tag *)&init_tags;
797         struct machine_desc *mdesc = NULL, *p;
798         char *from = default_command_line;
799
800         init_tags.mem.start = PHYS_OFFSET;
801
802         /*
803          * locate machine in the list of supported machines.
804          */
805         for_each_machine_desc(p)
806                 if (nr == p->nr) {
807                         printk("Machine: %s\n", p->name);
808                         mdesc = p;
809                         break;
810                 }
811
812         if (!mdesc) {
813                 early_print("\nError: unrecognized/unsupported machine ID"
814                         " (r1 = 0x%08x).\n\n", nr);
815                 dump_machine_table(); /* does not return */
816         }
817
818         if (__atags_pointer)
819                 tags = phys_to_virt(__atags_pointer);
820         else if (mdesc->boot_params) {
821 #ifdef CONFIG_MMU
822                 /*
823                  * We still are executing with a minimal MMU mapping created
824                  * with the presumption that the machine default for this
825                  * is located in the first MB of RAM.  Anything else will
826                  * fault and silently hang the kernel at this point.
827                  */
828                 if (mdesc->boot_params < PHYS_OFFSET ||
829                     mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
830                         printk(KERN_WARNING
831                                "Default boot params at physical 0x%08lx out of reach\n",
832                                mdesc->boot_params);
833                 } else
834 #endif
835                 {
836                         tags = phys_to_virt(mdesc->boot_params);
837                 }
838         }
839
840 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
841         /*
842          * If we have the old style parameters, convert them to
843          * a tag list.
844          */
845         if (tags->hdr.tag != ATAG_CORE)
846                 convert_to_tag_list(tags);
847 #endif
848
849         if (tags->hdr.tag != ATAG_CORE) {
850 #if defined(CONFIG_OF)
851                 /*
852                  * If CONFIG_OF is set, then assume this is a reasonably
853                  * modern system that should pass boot parameters
854                  */
855                 early_print("Warning: Neither atags nor dtb found\n");
856 #endif
857                 tags = (struct tag *)&init_tags;
858         }
859
860         if (mdesc->fixup)
861                 mdesc->fixup(mdesc, tags, &from, &meminfo);
862
863         if (tags->hdr.tag == ATAG_CORE) {
864                 if (meminfo.nr_banks != 0)
865                         squash_mem_tags(tags);
866                 save_atags(tags);
867                 parse_tags(tags);
868         }
869
870         /* parse_early_param needs a boot_command_line */
871         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
872
873         return mdesc;
874 }
875
876
877 void __init setup_arch(char **cmdline_p)
878 {
879         struct machine_desc *mdesc;
880
881         unwind_init();
882
883         setup_processor();
884         mdesc = setup_machine_fdt(__atags_pointer);
885         if (!mdesc)
886                 mdesc = setup_machine_tags(machine_arch_type);
887         machine_desc = mdesc;
888         machine_name = mdesc->name;
889
890         if (mdesc->soft_reboot)
891                 reboot_setup("s");
892
893         init_mm.start_code = (unsigned long) _text;
894         init_mm.end_code   = (unsigned long) _etext;
895         init_mm.end_data   = (unsigned long) _edata;
896         init_mm.brk        = (unsigned long) _end;
897
898         /* populate cmd_line too for later use, preserving boot_command_line */
899         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
900         *cmdline_p = cmd_line;
901
902         parse_early_param();
903
904         sanity_check_meminfo();
905         arm_memblock_init(&meminfo, mdesc);
906
907         paging_init(mdesc);
908         request_standard_resources(mdesc);
909
910         unflatten_device_tree();
911
912 #ifdef CONFIG_SMP
913         if (is_smp())
914                 smp_init_cpus();
915 #endif
916         reserve_crashkernel();
917
918         cpu_init();
919         tcm_init();
920
921 #ifdef CONFIG_MULTI_IRQ_HANDLER
922         handle_arch_irq = mdesc->handle_irq;
923 #endif
924
925 #ifdef CONFIG_VT
926 #if defined(CONFIG_VGA_CONSOLE)
927         conswitchp = &vga_con;
928 #elif defined(CONFIG_DUMMY_CONSOLE)
929         conswitchp = &dummy_con;
930 #endif
931 #endif
932         early_trap_init();
933
934         if (mdesc->init_early)
935                 mdesc->init_early();
936 }
937
938
939 static int __init topology_init(void)
940 {
941         int cpu;
942
943         for_each_possible_cpu(cpu) {
944                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
945                 cpuinfo->cpu.hotpluggable = 1;
946                 register_cpu(&cpuinfo->cpu, cpu);
947         }
948
949         return 0;
950 }
951 subsys_initcall(topology_init);
952
953 #ifdef CONFIG_HAVE_PROC_CPU
954 static int __init proc_cpu_init(void)
955 {
956         struct proc_dir_entry *res;
957
958         res = proc_mkdir("cpu", NULL);
959         if (!res)
960                 return -ENOMEM;
961         return 0;
962 }
963 fs_initcall(proc_cpu_init);
964 #endif
965
966 static const char *hwcap_str[] = {
967         "swp",
968         "half",
969         "thumb",
970         "26bit",
971         "fastmult",
972         "fpa",
973         "vfp",
974         "edsp",
975         "java",
976         "iwmmxt",
977         "crunch",
978         "thumbee",
979         "neon",
980         "vfpv3",
981         "vfpv3d16",
982         NULL
983 };
984
985 static int c_show(struct seq_file *m, void *v)
986 {
987         int i;
988
989         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
990                    cpu_name, read_cpuid_id() & 15, elf_platform);
991
992 #if defined(CONFIG_SMP)
993         for_each_online_cpu(i) {
994                 /*
995                  * glibc reads /proc/cpuinfo to determine the number of
996                  * online processors, looking for lines beginning with
997                  * "processor".  Give glibc what it expects.
998                  */
999                 seq_printf(m, "processor\t: %d\n", i);
1000                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1001                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1002                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1003         }
1004 #else /* CONFIG_SMP */
1005         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1006                    loops_per_jiffy / (500000/HZ),
1007                    (loops_per_jiffy / (5000/HZ)) % 100);
1008 #endif
1009
1010         /* dump out the processor features */
1011         seq_puts(m, "Features\t: ");
1012
1013         for (i = 0; hwcap_str[i]; i++)
1014                 if (elf_hwcap & (1 << i))
1015                         seq_printf(m, "%s ", hwcap_str[i]);
1016
1017         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1018         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1019
1020         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1021                 /* pre-ARM7 */
1022                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1023         } else {
1024                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1025                         /* ARM7 */
1026                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1027                                    (read_cpuid_id() >> 16) & 127);
1028                 } else {
1029                         /* post-ARM7 */
1030                         seq_printf(m, "CPU variant\t: 0x%x\n",
1031                                    (read_cpuid_id() >> 20) & 15);
1032                 }
1033                 seq_printf(m, "CPU part\t: 0x%03x\n",
1034                            (read_cpuid_id() >> 4) & 0xfff);
1035         }
1036         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1037
1038         seq_puts(m, "\n");
1039
1040         seq_printf(m, "Hardware\t: %s\n", machine_name);
1041         seq_printf(m, "Revision\t: %04x\n", system_rev);
1042         seq_printf(m, "Serial\t\t: %08x%08x\n",
1043                    system_serial_high, system_serial_low);
1044
1045         return 0;
1046 }
1047
1048 static void *c_start(struct seq_file *m, loff_t *pos)
1049 {
1050         return *pos < 1 ? (void *)1 : NULL;
1051 }
1052
1053 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1054 {
1055         ++*pos;
1056         return NULL;
1057 }
1058
1059 static void c_stop(struct seq_file *m, void *v)
1060 {
1061 }
1062
1063 const struct seq_operations cpuinfo_op = {
1064         .start  = c_start,
1065         .next   = c_next,
1066         .stop   = c_stop,
1067         .show   = c_show
1068 };