Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[pandora-kernel.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/crash_dump.h>
25 #include <linux/root_dev.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/fs.h>
30 #include <linux/proc_fs.h>
31 #include <linux/memblock.h>
32 #include <linux/bug.h>
33 #include <linux/compiler.h>
34
35 #include <asm/unified.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47 #include <asm/system.h>
48
49 #include <asm/prom.h>
50 #include <asm/mach/arch.h>
51 #include <asm/mach/irq.h>
52 #include <asm/mach/time.h>
53 #include <asm/traps.h>
54 #include <asm/unwind.h>
55
56 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
57 #include "compat.h"
58 #endif
59 #include "atags.h"
60 #include "tcm.h"
61
62 #ifndef MEM_SIZE
63 #define MEM_SIZE        (16*1024*1024)
64 #endif
65
66 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
67 char fpe_type[8];
68
69 static int __init fpe_setup(char *line)
70 {
71         memcpy(fpe_type, line, 8);
72         return 1;
73 }
74
75 __setup("fpe=", fpe_setup);
76 #endif
77
78 extern void paging_init(struct machine_desc *desc);
79 extern void sanity_check_meminfo(void);
80 extern void reboot_setup(char *str);
81
82 unsigned int processor_id;
83 EXPORT_SYMBOL(processor_id);
84 unsigned int __machine_arch_type __read_mostly;
85 EXPORT_SYMBOL(__machine_arch_type);
86 unsigned int cacheid __read_mostly;
87 EXPORT_SYMBOL(cacheid);
88
89 unsigned int __atags_pointer __initdata;
90
91 unsigned int system_rev;
92 EXPORT_SYMBOL(system_rev);
93
94 unsigned int system_serial_low;
95 EXPORT_SYMBOL(system_serial_low);
96
97 unsigned int system_serial_high;
98 EXPORT_SYMBOL(system_serial_high);
99
100 unsigned int elf_hwcap __read_mostly;
101 EXPORT_SYMBOL(elf_hwcap);
102
103
104 #ifdef MULTI_CPU
105 struct processor processor __read_mostly;
106 #endif
107 #ifdef MULTI_TLB
108 struct cpu_tlb_fns cpu_tlb __read_mostly;
109 #endif
110 #ifdef MULTI_USER
111 struct cpu_user_fns cpu_user __read_mostly;
112 #endif
113 #ifdef MULTI_CACHE
114 struct cpu_cache_fns cpu_cache __read_mostly;
115 #endif
116 #ifdef CONFIG_OUTER_CACHE
117 struct outer_cache_fns outer_cache __read_mostly;
118 EXPORT_SYMBOL(outer_cache);
119 #endif
120
121 /*
122  * Cached cpu_architecture() result for use by assembler code.
123  * C code should use the cpu_architecture() function instead of accessing this
124  * variable directly.
125  */
126 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
127
128 struct stack {
129         u32 irq[3];
130         u32 abt[3];
131         u32 und[3];
132 } ____cacheline_aligned;
133
134 static struct stack stacks[NR_CPUS];
135
136 char elf_platform[ELF_PLATFORM_SIZE];
137 EXPORT_SYMBOL(elf_platform);
138
139 static const char *cpu_name;
140 static const char *machine_name;
141 static char __initdata cmd_line[COMMAND_LINE_SIZE];
142 struct machine_desc *machine_desc __initdata;
143
144 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
145 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
146 #define ENDIANNESS ((char)endian_test.l)
147
148 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
149
150 /*
151  * Standard memory resources
152  */
153 static struct resource mem_res[] = {
154         {
155                 .name = "Video RAM",
156                 .start = 0,
157                 .end = 0,
158                 .flags = IORESOURCE_MEM
159         },
160         {
161                 .name = "Kernel text",
162                 .start = 0,
163                 .end = 0,
164                 .flags = IORESOURCE_MEM
165         },
166         {
167                 .name = "Kernel data",
168                 .start = 0,
169                 .end = 0,
170                 .flags = IORESOURCE_MEM
171         }
172 };
173
174 #define video_ram   mem_res[0]
175 #define kernel_code mem_res[1]
176 #define kernel_data mem_res[2]
177
178 static struct resource io_res[] = {
179         {
180                 .name = "reserved",
181                 .start = 0x3bc,
182                 .end = 0x3be,
183                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
184         },
185         {
186                 .name = "reserved",
187                 .start = 0x378,
188                 .end = 0x37f,
189                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
190         },
191         {
192                 .name = "reserved",
193                 .start = 0x278,
194                 .end = 0x27f,
195                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
196         }
197 };
198
199 #define lp0 io_res[0]
200 #define lp1 io_res[1]
201 #define lp2 io_res[2]
202
203 static const char *proc_arch[] = {
204         "undefined/unknown",
205         "3",
206         "4",
207         "4T",
208         "5",
209         "5T",
210         "5TE",
211         "5TEJ",
212         "6TEJ",
213         "7",
214         "?(11)",
215         "?(12)",
216         "?(13)",
217         "?(14)",
218         "?(15)",
219         "?(16)",
220         "?(17)",
221 };
222
223 static int __get_cpu_architecture(void)
224 {
225         int cpu_arch;
226
227         if ((read_cpuid_id() & 0x0008f000) == 0) {
228                 cpu_arch = CPU_ARCH_UNKNOWN;
229         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
230                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
231         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
232                 cpu_arch = (read_cpuid_id() >> 16) & 7;
233                 if (cpu_arch)
234                         cpu_arch += CPU_ARCH_ARMv3;
235         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
236                 unsigned int mmfr0;
237
238                 /* Revised CPUID format. Read the Memory Model Feature
239                  * Register 0 and check for VMSAv7 or PMSAv7 */
240                 asm("mrc        p15, 0, %0, c0, c1, 4"
241                     : "=r" (mmfr0));
242                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
243                     (mmfr0 & 0x000000f0) >= 0x00000030)
244                         cpu_arch = CPU_ARCH_ARMv7;
245                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
246                          (mmfr0 & 0x000000f0) == 0x00000020)
247                         cpu_arch = CPU_ARCH_ARMv6;
248                 else
249                         cpu_arch = CPU_ARCH_UNKNOWN;
250         } else
251                 cpu_arch = CPU_ARCH_UNKNOWN;
252
253         return cpu_arch;
254 }
255
256 int __pure cpu_architecture(void)
257 {
258         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
259
260         return __cpu_architecture;
261 }
262
263 static int cpu_has_aliasing_icache(unsigned int arch)
264 {
265         int aliasing_icache;
266         unsigned int id_reg, num_sets, line_size;
267
268         /* PIPT caches never alias. */
269         if (icache_is_pipt())
270                 return 0;
271
272         /* arch specifies the register format */
273         switch (arch) {
274         case CPU_ARCH_ARMv7:
275                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
276                     : /* No output operands */
277                     : "r" (1));
278                 isb();
279                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
280                     : "=r" (id_reg));
281                 line_size = 4 << ((id_reg & 0x7) + 2);
282                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
283                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
284                 break;
285         case CPU_ARCH_ARMv6:
286                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
287                 break;
288         default:
289                 /* I-cache aliases will be handled by D-cache aliasing code */
290                 aliasing_icache = 0;
291         }
292
293         return aliasing_icache;
294 }
295
296 static void __init cacheid_init(void)
297 {
298         unsigned int cachetype = read_cpuid_cachetype();
299         unsigned int arch = cpu_architecture();
300
301         if (arch >= CPU_ARCH_ARMv6) {
302                 if ((cachetype & (7 << 29)) == 4 << 29) {
303                         /* ARMv7 register format */
304                         arch = CPU_ARCH_ARMv7;
305                         cacheid = CACHEID_VIPT_NONALIASING;
306                         switch (cachetype & (3 << 14)) {
307                         case (1 << 14):
308                                 cacheid |= CACHEID_ASID_TAGGED;
309                                 break;
310                         case (3 << 14):
311                                 cacheid |= CACHEID_PIPT;
312                                 break;
313                         }
314                 } else {
315                         arch = CPU_ARCH_ARMv6;
316                         if (cachetype & (1 << 23))
317                                 cacheid = CACHEID_VIPT_ALIASING;
318                         else
319                                 cacheid = CACHEID_VIPT_NONALIASING;
320                 }
321                 if (cpu_has_aliasing_icache(arch))
322                         cacheid |= CACHEID_VIPT_I_ALIASING;
323         } else {
324                 cacheid = CACHEID_VIVT;
325         }
326
327         printk("CPU: %s data cache, %s instruction cache\n",
328                 cache_is_vivt() ? "VIVT" :
329                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
330                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
331                 cache_is_vivt() ? "VIVT" :
332                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
333                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
334                 icache_is_pipt() ? "PIPT" :
335                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
336 }
337
338 /*
339  * These functions re-use the assembly code in head.S, which
340  * already provide the required functionality.
341  */
342 extern struct proc_info_list *lookup_processor_type(unsigned int);
343
344 void __init early_print(const char *str, ...)
345 {
346         extern void printascii(const char *);
347         char buf[256];
348         va_list ap;
349
350         va_start(ap, str);
351         vsnprintf(buf, sizeof(buf), str, ap);
352         va_end(ap);
353
354 #ifdef CONFIG_DEBUG_LL
355         printascii(buf);
356 #endif
357         printk("%s", buf);
358 }
359
360 static void __init feat_v6_fixup(void)
361 {
362         int id = read_cpuid_id();
363
364         if ((id & 0xff0f0000) != 0x41070000)
365                 return;
366
367         /*
368          * HWCAP_TLS is available only on 1136 r1p0 and later,
369          * see also kuser_get_tls_init.
370          */
371         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
372                 elf_hwcap &= ~HWCAP_TLS;
373 }
374
375 /*
376  * cpu_init - initialise one CPU.
377  *
378  * cpu_init sets up the per-CPU stacks.
379  */
380 void cpu_init(void)
381 {
382         unsigned int cpu = smp_processor_id();
383         struct stack *stk = &stacks[cpu];
384
385         if (cpu >= NR_CPUS) {
386                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
387                 BUG();
388         }
389
390         cpu_proc_init();
391
392         /*
393          * Define the placement constraint for the inline asm directive below.
394          * In Thumb-2, msr with an immediate value is not allowed.
395          */
396 #ifdef CONFIG_THUMB2_KERNEL
397 #define PLC     "r"
398 #else
399 #define PLC     "I"
400 #endif
401
402         /*
403          * setup stacks for re-entrant exception handlers
404          */
405         __asm__ (
406         "msr    cpsr_c, %1\n\t"
407         "add    r14, %0, %2\n\t"
408         "mov    sp, r14\n\t"
409         "msr    cpsr_c, %3\n\t"
410         "add    r14, %0, %4\n\t"
411         "mov    sp, r14\n\t"
412         "msr    cpsr_c, %5\n\t"
413         "add    r14, %0, %6\n\t"
414         "mov    sp, r14\n\t"
415         "msr    cpsr_c, %7"
416             :
417             : "r" (stk),
418               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
419               "I" (offsetof(struct stack, irq[0])),
420               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
421               "I" (offsetof(struct stack, abt[0])),
422               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
423               "I" (offsetof(struct stack, und[0])),
424               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
425             : "r14");
426 }
427
428 static void __init setup_processor(void)
429 {
430         struct proc_info_list *list;
431
432         /*
433          * locate processor in the list of supported processor
434          * types.  The linker builds this table for us from the
435          * entries in arch/arm/mm/proc-*.S
436          */
437         list = lookup_processor_type(read_cpuid_id());
438         if (!list) {
439                 printk("CPU configuration botched (ID %08x), unable "
440                        "to continue.\n", read_cpuid_id());
441                 while (1);
442         }
443
444         cpu_name = list->cpu_name;
445         __cpu_architecture = __get_cpu_architecture();
446
447 #ifdef MULTI_CPU
448         processor = *list->proc;
449 #endif
450 #ifdef MULTI_TLB
451         cpu_tlb = *list->tlb;
452 #endif
453 #ifdef MULTI_USER
454         cpu_user = *list->user;
455 #endif
456 #ifdef MULTI_CACHE
457         cpu_cache = *list->cache;
458 #endif
459
460         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
461                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
462                proc_arch[cpu_architecture()], cr_alignment);
463
464         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
465                  list->arch_name, ENDIANNESS);
466         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
467                  list->elf_name, ENDIANNESS);
468         elf_hwcap = list->elf_hwcap;
469 #ifndef CONFIG_ARM_THUMB
470         elf_hwcap &= ~HWCAP_THUMB;
471 #endif
472
473         feat_v6_fixup();
474
475         cacheid_init();
476         cpu_init();
477 }
478
479 void __init dump_machine_table(void)
480 {
481         struct machine_desc *p;
482
483         early_print("Available machine support:\n\nID (hex)\tNAME\n");
484         for_each_machine_desc(p)
485                 early_print("%08x\t%s\n", p->nr, p->name);
486
487         early_print("\nPlease check your kernel config and/or bootloader.\n");
488
489         while (true)
490                 /* can't use cpu_relax() here as it may require MMU setup */;
491 }
492
493 int __init arm_add_memory(phys_addr_t start, unsigned long size)
494 {
495         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
496
497         if (meminfo.nr_banks >= NR_BANKS) {
498                 printk(KERN_CRIT "NR_BANKS too low, "
499                         "ignoring memory at 0x%08llx\n", (long long)start);
500                 return -EINVAL;
501         }
502
503         /*
504          * Ensure that start/size are aligned to a page boundary.
505          * Size is appropriately rounded down, start is rounded up.
506          */
507         size -= start & ~PAGE_MASK;
508         bank->start = PAGE_ALIGN(start);
509         bank->size  = size & PAGE_MASK;
510
511         /*
512          * Check whether this memory region has non-zero size or
513          * invalid node number.
514          */
515         if (bank->size == 0)
516                 return -EINVAL;
517
518         meminfo.nr_banks++;
519         return 0;
520 }
521
522 /*
523  * Pick out the memory size.  We look for mem=size@start,
524  * where start and size are "size[KkMm]"
525  */
526 static int __init early_mem(char *p)
527 {
528         static int usermem __initdata = 0;
529         unsigned long size;
530         phys_addr_t start;
531         char *endp;
532
533         /*
534          * If the user specifies memory size, we
535          * blow away any automatically generated
536          * size.
537          */
538         if (usermem == 0) {
539                 usermem = 1;
540                 meminfo.nr_banks = 0;
541         }
542
543         start = PHYS_OFFSET;
544         size  = memparse(p, &endp);
545         if (*endp == '@')
546                 start = memparse(endp + 1, NULL);
547
548         arm_add_memory(start, size);
549
550         return 0;
551 }
552 early_param("mem", early_mem);
553
554 static void __init
555 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
556 {
557 #ifdef CONFIG_BLK_DEV_RAM
558         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
559
560         rd_image_start = image_start;
561         rd_prompt = prompt;
562         rd_doload = doload;
563
564         if (rd_sz)
565                 rd_size = rd_sz;
566 #endif
567 }
568
569 static void __init request_standard_resources(struct machine_desc *mdesc)
570 {
571         struct memblock_region *region;
572         struct resource *res;
573
574         kernel_code.start   = virt_to_phys(_text);
575         kernel_code.end     = virt_to_phys(_etext - 1);
576         kernel_data.start   = virt_to_phys(_sdata);
577         kernel_data.end     = virt_to_phys(_end - 1);
578
579         for_each_memblock(memory, region) {
580                 res = alloc_bootmem_low(sizeof(*res));
581                 res->name  = "System RAM";
582                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
583                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
584                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
585
586                 request_resource(&iomem_resource, res);
587
588                 if (kernel_code.start >= res->start &&
589                     kernel_code.end <= res->end)
590                         request_resource(res, &kernel_code);
591                 if (kernel_data.start >= res->start &&
592                     kernel_data.end <= res->end)
593                         request_resource(res, &kernel_data);
594         }
595
596         if (mdesc->video_start) {
597                 video_ram.start = mdesc->video_start;
598                 video_ram.end   = mdesc->video_end;
599                 request_resource(&iomem_resource, &video_ram);
600         }
601
602         /*
603          * Some machines don't have the possibility of ever
604          * possessing lp0, lp1 or lp2
605          */
606         if (mdesc->reserve_lp0)
607                 request_resource(&ioport_resource, &lp0);
608         if (mdesc->reserve_lp1)
609                 request_resource(&ioport_resource, &lp1);
610         if (mdesc->reserve_lp2)
611                 request_resource(&ioport_resource, &lp2);
612 }
613
614 /*
615  *  Tag parsing.
616  *
617  * This is the new way of passing data to the kernel at boot time.  Rather
618  * than passing a fixed inflexible structure to the kernel, we pass a list
619  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
620  * tag for the list to be recognised (to distinguish the tagged list from
621  * a param_struct).  The list is terminated with a zero-length tag (this tag
622  * is not parsed in any way).
623  */
624 static int __init parse_tag_core(const struct tag *tag)
625 {
626         if (tag->hdr.size > 2) {
627                 if ((tag->u.core.flags & 1) == 0)
628                         root_mountflags &= ~MS_RDONLY;
629                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
630         }
631         return 0;
632 }
633
634 __tagtable(ATAG_CORE, parse_tag_core);
635
636 static int __init parse_tag_mem32(const struct tag *tag)
637 {
638         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
639 }
640
641 __tagtable(ATAG_MEM, parse_tag_mem32);
642
643 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
644 struct screen_info screen_info = {
645  .orig_video_lines      = 30,
646  .orig_video_cols       = 80,
647  .orig_video_mode       = 0,
648  .orig_video_ega_bx     = 0,
649  .orig_video_isVGA      = 1,
650  .orig_video_points     = 8
651 };
652
653 static int __init parse_tag_videotext(const struct tag *tag)
654 {
655         screen_info.orig_x            = tag->u.videotext.x;
656         screen_info.orig_y            = tag->u.videotext.y;
657         screen_info.orig_video_page   = tag->u.videotext.video_page;
658         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
659         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
660         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
661         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
662         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
663         screen_info.orig_video_points = tag->u.videotext.video_points;
664         return 0;
665 }
666
667 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
668 #endif
669
670 static int __init parse_tag_ramdisk(const struct tag *tag)
671 {
672         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
673                       (tag->u.ramdisk.flags & 2) == 0,
674                       tag->u.ramdisk.start, tag->u.ramdisk.size);
675         return 0;
676 }
677
678 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
679
680 static int __init parse_tag_serialnr(const struct tag *tag)
681 {
682         system_serial_low = tag->u.serialnr.low;
683         system_serial_high = tag->u.serialnr.high;
684         return 0;
685 }
686
687 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
688
689 static int __init parse_tag_revision(const struct tag *tag)
690 {
691         system_rev = tag->u.revision.rev;
692         return 0;
693 }
694
695 __tagtable(ATAG_REVISION, parse_tag_revision);
696
697 static int __init parse_tag_cmdline(const struct tag *tag)
698 {
699 #if defined(CONFIG_CMDLINE_EXTEND)
700         strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
701         strlcat(default_command_line, tag->u.cmdline.cmdline,
702                 COMMAND_LINE_SIZE);
703 #elif defined(CONFIG_CMDLINE_FORCE)
704         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
705 #else
706         strlcpy(default_command_line, tag->u.cmdline.cmdline,
707                 COMMAND_LINE_SIZE);
708 #endif
709         return 0;
710 }
711
712 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
713
714 /*
715  * Scan the tag table for this tag, and call its parse function.
716  * The tag table is built by the linker from all the __tagtable
717  * declarations.
718  */
719 static int __init parse_tag(const struct tag *tag)
720 {
721         extern struct tagtable __tagtable_begin, __tagtable_end;
722         struct tagtable *t;
723
724         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
725                 if (tag->hdr.tag == t->tag) {
726                         t->parse(tag);
727                         break;
728                 }
729
730         return t < &__tagtable_end;
731 }
732
733 /*
734  * Parse all tags in the list, checking both the global and architecture
735  * specific tag tables.
736  */
737 static void __init parse_tags(const struct tag *t)
738 {
739         for (; t->hdr.size; t = tag_next(t))
740                 if (!parse_tag(t))
741                         printk(KERN_WARNING
742                                 "Ignoring unrecognised tag 0x%08x\n",
743                                 t->hdr.tag);
744 }
745
746 /*
747  * This holds our defaults.
748  */
749 static struct init_tags {
750         struct tag_header hdr1;
751         struct tag_core   core;
752         struct tag_header hdr2;
753         struct tag_mem32  mem;
754         struct tag_header hdr3;
755 } init_tags __initdata = {
756         { tag_size(tag_core), ATAG_CORE },
757         { 1, PAGE_SIZE, 0xff },
758         { tag_size(tag_mem32), ATAG_MEM },
759         { MEM_SIZE },
760         { 0, ATAG_NONE }
761 };
762
763 static int __init customize_machine(void)
764 {
765         /* customizes platform devices, or adds new ones */
766         if (machine_desc->init_machine)
767                 machine_desc->init_machine();
768         return 0;
769 }
770 arch_initcall(customize_machine);
771
772 #ifdef CONFIG_KEXEC
773 static inline unsigned long long get_total_mem(void)
774 {
775         unsigned long total;
776
777         total = max_low_pfn - min_low_pfn;
778         return total << PAGE_SHIFT;
779 }
780
781 /**
782  * reserve_crashkernel() - reserves memory are for crash kernel
783  *
784  * This function reserves memory area given in "crashkernel=" kernel command
785  * line parameter. The memory reserved is used by a dump capture kernel when
786  * primary kernel is crashing.
787  */
788 static void __init reserve_crashkernel(void)
789 {
790         unsigned long long crash_size, crash_base;
791         unsigned long long total_mem;
792         int ret;
793
794         total_mem = get_total_mem();
795         ret = parse_crashkernel(boot_command_line, total_mem,
796                                 &crash_size, &crash_base);
797         if (ret)
798                 return;
799
800         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
801         if (ret < 0) {
802                 printk(KERN_WARNING "crashkernel reservation failed - "
803                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
804                 return;
805         }
806
807         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
808                "for crashkernel (System RAM: %ldMB)\n",
809                (unsigned long)(crash_size >> 20),
810                (unsigned long)(crash_base >> 20),
811                (unsigned long)(total_mem >> 20));
812
813         crashk_res.start = crash_base;
814         crashk_res.end = crash_base + crash_size - 1;
815         insert_resource(&iomem_resource, &crashk_res);
816 }
817 #else
818 static inline void reserve_crashkernel(void) {}
819 #endif /* CONFIG_KEXEC */
820
821 static void __init squash_mem_tags(struct tag *tag)
822 {
823         for (; tag->hdr.size; tag = tag_next(tag))
824                 if (tag->hdr.tag == ATAG_MEM)
825                         tag->hdr.tag = ATAG_NONE;
826 }
827
828 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
829 {
830         struct tag *tags = (struct tag *)&init_tags;
831         struct machine_desc *mdesc = NULL, *p;
832         char *from = default_command_line;
833
834         init_tags.mem.start = PHYS_OFFSET;
835
836         /*
837          * locate machine in the list of supported machines.
838          */
839         for_each_machine_desc(p)
840                 if (nr == p->nr) {
841                         printk("Machine: %s\n", p->name);
842                         mdesc = p;
843                         break;
844                 }
845
846         if (!mdesc) {
847                 early_print("\nError: unrecognized/unsupported machine ID"
848                         " (r1 = 0x%08x).\n\n", nr);
849                 dump_machine_table(); /* does not return */
850         }
851
852         if (__atags_pointer)
853                 tags = phys_to_virt(__atags_pointer);
854         else if (mdesc->atag_offset)
855                 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
856
857 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
858         /*
859          * If we have the old style parameters, convert them to
860          * a tag list.
861          */
862         if (tags->hdr.tag != ATAG_CORE)
863                 convert_to_tag_list(tags);
864 #endif
865
866         if (tags->hdr.tag != ATAG_CORE) {
867 #if defined(CONFIG_OF)
868                 /*
869                  * If CONFIG_OF is set, then assume this is a reasonably
870                  * modern system that should pass boot parameters
871                  */
872                 early_print("Warning: Neither atags nor dtb found\n");
873 #endif
874                 tags = (struct tag *)&init_tags;
875         }
876
877         if (mdesc->fixup)
878                 mdesc->fixup(tags, &from, &meminfo);
879
880         if (tags->hdr.tag == ATAG_CORE) {
881                 if (meminfo.nr_banks != 0)
882                         squash_mem_tags(tags);
883                 save_atags(tags);
884                 parse_tags(tags);
885         }
886
887         /* parse_early_param needs a boot_command_line */
888         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
889
890         return mdesc;
891 }
892
893
894 void __init setup_arch(char **cmdline_p)
895 {
896         struct machine_desc *mdesc;
897
898         unwind_init();
899
900         setup_processor();
901         mdesc = setup_machine_fdt(__atags_pointer);
902         if (!mdesc)
903                 mdesc = setup_machine_tags(machine_arch_type);
904         machine_desc = mdesc;
905         machine_name = mdesc->name;
906
907         if (mdesc->soft_reboot)
908                 reboot_setup("s");
909
910         init_mm.start_code = (unsigned long) _text;
911         init_mm.end_code   = (unsigned long) _etext;
912         init_mm.end_data   = (unsigned long) _edata;
913         init_mm.brk        = (unsigned long) _end;
914
915         /* populate cmd_line too for later use, preserving boot_command_line */
916         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
917         *cmdline_p = cmd_line;
918
919         parse_early_param();
920
921         sanity_check_meminfo();
922         arm_memblock_init(&meminfo, mdesc);
923
924         paging_init(mdesc);
925         request_standard_resources(mdesc);
926
927         unflatten_device_tree();
928
929 #ifdef CONFIG_SMP
930         if (is_smp())
931                 smp_init_cpus();
932 #endif
933         reserve_crashkernel();
934
935         tcm_init();
936
937 #ifdef CONFIG_ZONE_DMA
938         if (mdesc->dma_zone_size) {
939                 extern unsigned long arm_dma_zone_size;
940                 arm_dma_zone_size = mdesc->dma_zone_size;
941         }
942 #endif
943 #ifdef CONFIG_MULTI_IRQ_HANDLER
944         handle_arch_irq = mdesc->handle_irq;
945 #endif
946
947 #ifdef CONFIG_VT
948 #if defined(CONFIG_VGA_CONSOLE)
949         conswitchp = &vga_con;
950 #elif defined(CONFIG_DUMMY_CONSOLE)
951         conswitchp = &dummy_con;
952 #endif
953 #endif
954         early_trap_init();
955
956         if (mdesc->init_early)
957                 mdesc->init_early();
958 }
959
960
961 static int __init topology_init(void)
962 {
963         int cpu;
964
965         for_each_possible_cpu(cpu) {
966                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
967                 cpuinfo->cpu.hotpluggable = 1;
968                 register_cpu(&cpuinfo->cpu, cpu);
969         }
970
971         return 0;
972 }
973 subsys_initcall(topology_init);
974
975 #ifdef CONFIG_HAVE_PROC_CPU
976 static int __init proc_cpu_init(void)
977 {
978         struct proc_dir_entry *res;
979
980         res = proc_mkdir("cpu", NULL);
981         if (!res)
982                 return -ENOMEM;
983         return 0;
984 }
985 fs_initcall(proc_cpu_init);
986 #endif
987
988 static const char *hwcap_str[] = {
989         "swp",
990         "half",
991         "thumb",
992         "26bit",
993         "fastmult",
994         "fpa",
995         "vfp",
996         "edsp",
997         "java",
998         "iwmmxt",
999         "crunch",
1000         "thumbee",
1001         "neon",
1002         "vfpv3",
1003         "vfpv3d16",
1004         "tls",
1005         "vfpv4",
1006         "idiva",
1007         "idivt",
1008         NULL
1009 };
1010
1011 static int c_show(struct seq_file *m, void *v)
1012 {
1013         int i;
1014
1015         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1016                    cpu_name, read_cpuid_id() & 15, elf_platform);
1017
1018 #if defined(CONFIG_SMP)
1019         for_each_online_cpu(i) {
1020                 /*
1021                  * glibc reads /proc/cpuinfo to determine the number of
1022                  * online processors, looking for lines beginning with
1023                  * "processor".  Give glibc what it expects.
1024                  */
1025                 seq_printf(m, "processor\t: %d\n", i);
1026                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
1027                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1028                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1029         }
1030 #else /* CONFIG_SMP */
1031         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1032                    loops_per_jiffy / (500000/HZ),
1033                    (loops_per_jiffy / (5000/HZ)) % 100);
1034 #endif
1035
1036         /* dump out the processor features */
1037         seq_puts(m, "Features\t: ");
1038
1039         for (i = 0; hwcap_str[i]; i++)
1040                 if (elf_hwcap & (1 << i))
1041                         seq_printf(m, "%s ", hwcap_str[i]);
1042
1043         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1044         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1045
1046         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1047                 /* pre-ARM7 */
1048                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1049         } else {
1050                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1051                         /* ARM7 */
1052                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1053                                    (read_cpuid_id() >> 16) & 127);
1054                 } else {
1055                         /* post-ARM7 */
1056                         seq_printf(m, "CPU variant\t: 0x%x\n",
1057                                    (read_cpuid_id() >> 20) & 15);
1058                 }
1059                 seq_printf(m, "CPU part\t: 0x%03x\n",
1060                            (read_cpuid_id() >> 4) & 0xfff);
1061         }
1062         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1063
1064         seq_puts(m, "\n");
1065
1066         seq_printf(m, "Hardware\t: %s\n", machine_name);
1067         seq_printf(m, "Revision\t: %04x\n", system_rev);
1068         seq_printf(m, "Serial\t\t: %08x%08x\n",
1069                    system_serial_high, system_serial_low);
1070
1071         return 0;
1072 }
1073
1074 static void *c_start(struct seq_file *m, loff_t *pos)
1075 {
1076         return *pos < 1 ? (void *)1 : NULL;
1077 }
1078
1079 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1080 {
1081         ++*pos;
1082         return NULL;
1083 }
1084
1085 static void c_stop(struct seq_file *m, void *v)
1086 {
1087 }
1088
1089 const struct seq_operations cpuinfo_op = {
1090         .start  = c_start,
1091         .next   = c_next,
1092         .stop   = c_stop,
1093         .show   = c_show
1094 };