hfsplus: ensure bio requests are not smaller than the hardware sectors
[pandora-kernel.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/init.h>
22 #include <linux/kexec.h>
23 #include <linux/of_fdt.h>
24 #include <linux/crash_dump.h>
25 #include <linux/root_dev.h>
26 #include <linux/cpu.h>
27 #include <linux/interrupt.h>
28 #include <linux/smp.h>
29 #include <linux/fs.h>
30 #include <linux/proc_fs.h>
31 #include <linux/memblock.h>
32
33 #include <asm/unified.h>
34 #include <asm/cpu.h>
35 #include <asm/cputype.h>
36 #include <asm/elf.h>
37 #include <asm/procinfo.h>
38 #include <asm/sections.h>
39 #include <asm/setup.h>
40 #include <asm/smp_plat.h>
41 #include <asm/mach-types.h>
42 #include <asm/cacheflush.h>
43 #include <asm/cachetype.h>
44 #include <asm/tlbflush.h>
45
46 #include <asm/prom.h>
47 #include <asm/mach/arch.h>
48 #include <asm/mach/irq.h>
49 #include <asm/mach/time.h>
50 #include <asm/traps.h>
51 #include <asm/unwind.h>
52
53 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
54 #include "compat.h"
55 #endif
56 #include "atags.h"
57 #include "tcm.h"
58
59 #ifndef MEM_SIZE
60 #define MEM_SIZE        (16*1024*1024)
61 #endif
62
63 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64 char fpe_type[8];
65
66 static int __init fpe_setup(char *line)
67 {
68         memcpy(fpe_type, line, 8);
69         return 1;
70 }
71
72 __setup("fpe=", fpe_setup);
73 #endif
74
75 extern void paging_init(struct machine_desc *desc);
76 extern void reboot_setup(char *str);
77
78 unsigned int processor_id;
79 EXPORT_SYMBOL(processor_id);
80 unsigned int __machine_arch_type __read_mostly;
81 EXPORT_SYMBOL(__machine_arch_type);
82 unsigned int cacheid __read_mostly;
83 EXPORT_SYMBOL(cacheid);
84
85 unsigned int __atags_pointer __initdata;
86
87 unsigned int system_rev;
88 EXPORT_SYMBOL(system_rev);
89
90 unsigned int system_serial_low;
91 EXPORT_SYMBOL(system_serial_low);
92
93 unsigned int system_serial_high;
94 EXPORT_SYMBOL(system_serial_high);
95
96 unsigned int elf_hwcap __read_mostly;
97 EXPORT_SYMBOL(elf_hwcap);
98
99
100 #ifdef MULTI_CPU
101 struct processor processor __read_mostly;
102 #endif
103 #ifdef MULTI_TLB
104 struct cpu_tlb_fns cpu_tlb __read_mostly;
105 #endif
106 #ifdef MULTI_USER
107 struct cpu_user_fns cpu_user __read_mostly;
108 #endif
109 #ifdef MULTI_CACHE
110 struct cpu_cache_fns cpu_cache __read_mostly;
111 #endif
112 #ifdef CONFIG_OUTER_CACHE
113 struct outer_cache_fns outer_cache __read_mostly;
114 EXPORT_SYMBOL(outer_cache);
115 #endif
116
117 struct stack {
118         u32 irq[3];
119         u32 abt[3];
120         u32 und[3];
121 } ____cacheline_aligned;
122
123 static struct stack stacks[NR_CPUS];
124
125 char elf_platform[ELF_PLATFORM_SIZE];
126 EXPORT_SYMBOL(elf_platform);
127
128 static const char *cpu_name;
129 static const char *machine_name;
130 static char __initdata cmd_line[COMMAND_LINE_SIZE];
131 struct machine_desc *machine_desc __initdata;
132
133 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
134 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
135 #define ENDIANNESS ((char)endian_test.l)
136
137 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
138
139 /*
140  * Standard memory resources
141  */
142 static struct resource mem_res[] = {
143         {
144                 .name = "Video RAM",
145                 .start = 0,
146                 .end = 0,
147                 .flags = IORESOURCE_MEM
148         },
149         {
150                 .name = "Kernel text",
151                 .start = 0,
152                 .end = 0,
153                 .flags = IORESOURCE_MEM
154         },
155         {
156                 .name = "Kernel data",
157                 .start = 0,
158                 .end = 0,
159                 .flags = IORESOURCE_MEM
160         }
161 };
162
163 #define video_ram   mem_res[0]
164 #define kernel_code mem_res[1]
165 #define kernel_data mem_res[2]
166
167 static struct resource io_res[] = {
168         {
169                 .name = "reserved",
170                 .start = 0x3bc,
171                 .end = 0x3be,
172                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
173         },
174         {
175                 .name = "reserved",
176                 .start = 0x378,
177                 .end = 0x37f,
178                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
179         },
180         {
181                 .name = "reserved",
182                 .start = 0x278,
183                 .end = 0x27f,
184                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
185         }
186 };
187
188 #define lp0 io_res[0]
189 #define lp1 io_res[1]
190 #define lp2 io_res[2]
191
192 static const char *proc_arch[] = {
193         "undefined/unknown",
194         "3",
195         "4",
196         "4T",
197         "5",
198         "5T",
199         "5TE",
200         "5TEJ",
201         "6TEJ",
202         "7",
203         "?(11)",
204         "?(12)",
205         "?(13)",
206         "?(14)",
207         "?(15)",
208         "?(16)",
209         "?(17)",
210 };
211
212 int cpu_architecture(void)
213 {
214         int cpu_arch;
215
216         if ((read_cpuid_id() & 0x0008f000) == 0) {
217                 cpu_arch = CPU_ARCH_UNKNOWN;
218         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
219                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
220         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
221                 cpu_arch = (read_cpuid_id() >> 16) & 7;
222                 if (cpu_arch)
223                         cpu_arch += CPU_ARCH_ARMv3;
224         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
225                 unsigned int mmfr0;
226
227                 /* Revised CPUID format. Read the Memory Model Feature
228                  * Register 0 and check for VMSAv7 or PMSAv7 */
229                 asm("mrc        p15, 0, %0, c0, c1, 4"
230                     : "=r" (mmfr0));
231                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
232                     (mmfr0 & 0x000000f0) >= 0x00000030)
233                         cpu_arch = CPU_ARCH_ARMv7;
234                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
235                          (mmfr0 & 0x000000f0) == 0x00000020)
236                         cpu_arch = CPU_ARCH_ARMv6;
237                 else
238                         cpu_arch = CPU_ARCH_UNKNOWN;
239         } else
240                 cpu_arch = CPU_ARCH_UNKNOWN;
241
242         return cpu_arch;
243 }
244
245 static int cpu_has_aliasing_icache(unsigned int arch)
246 {
247         int aliasing_icache;
248         unsigned int id_reg, num_sets, line_size;
249
250         /* arch specifies the register format */
251         switch (arch) {
252         case CPU_ARCH_ARMv7:
253                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
254                     : /* No output operands */
255                     : "r" (1));
256                 isb();
257                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
258                     : "=r" (id_reg));
259                 line_size = 4 << ((id_reg & 0x7) + 2);
260                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
261                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
262                 break;
263         case CPU_ARCH_ARMv6:
264                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
265                 break;
266         default:
267                 /* I-cache aliases will be handled by D-cache aliasing code */
268                 aliasing_icache = 0;
269         }
270
271         return aliasing_icache;
272 }
273
274 static void __init cacheid_init(void)
275 {
276         unsigned int cachetype = read_cpuid_cachetype();
277         unsigned int arch = cpu_architecture();
278
279         if (arch >= CPU_ARCH_ARMv6) {
280                 if ((cachetype & (7 << 29)) == 4 << 29) {
281                         /* ARMv7 register format */
282                         cacheid = CACHEID_VIPT_NONALIASING;
283                         if ((cachetype & (3 << 14)) == 1 << 14)
284                                 cacheid |= CACHEID_ASID_TAGGED;
285                         else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
286                                 cacheid |= CACHEID_VIPT_I_ALIASING;
287                 } else if (cachetype & (1 << 23)) {
288                         cacheid = CACHEID_VIPT_ALIASING;
289                 } else {
290                         cacheid = CACHEID_VIPT_NONALIASING;
291                         if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6))
292                                 cacheid |= CACHEID_VIPT_I_ALIASING;
293                 }
294         } else {
295                 cacheid = CACHEID_VIVT;
296         }
297
298         printk("CPU: %s data cache, %s instruction cache\n",
299                 cache_is_vivt() ? "VIVT" :
300                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
301                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown",
302                 cache_is_vivt() ? "VIVT" :
303                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
304                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
305                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
306 }
307
308 /*
309  * These functions re-use the assembly code in head.S, which
310  * already provide the required functionality.
311  */
312 extern struct proc_info_list *lookup_processor_type(unsigned int);
313
314 void __init early_print(const char *str, ...)
315 {
316         extern void printascii(const char *);
317         char buf[256];
318         va_list ap;
319
320         va_start(ap, str);
321         vsnprintf(buf, sizeof(buf), str, ap);
322         va_end(ap);
323
324 #ifdef CONFIG_DEBUG_LL
325         printascii(buf);
326 #endif
327         printk("%s", buf);
328 }
329
330 static void __init feat_v6_fixup(void)
331 {
332         int id = read_cpuid_id();
333
334         if ((id & 0xff0f0000) != 0x41070000)
335                 return;
336
337         /*
338          * HWCAP_TLS is available only on 1136 r1p0 and later,
339          * see also kuser_get_tls_init.
340          */
341         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
342                 elf_hwcap &= ~HWCAP_TLS;
343 }
344
345 static void __init setup_processor(void)
346 {
347         struct proc_info_list *list;
348
349         /*
350          * locate processor in the list of supported processor
351          * types.  The linker builds this table for us from the
352          * entries in arch/arm/mm/proc-*.S
353          */
354         list = lookup_processor_type(read_cpuid_id());
355         if (!list) {
356                 printk("CPU configuration botched (ID %08x), unable "
357                        "to continue.\n", read_cpuid_id());
358                 while (1);
359         }
360
361         cpu_name = list->cpu_name;
362
363 #ifdef MULTI_CPU
364         processor = *list->proc;
365 #endif
366 #ifdef MULTI_TLB
367         cpu_tlb = *list->tlb;
368 #endif
369 #ifdef MULTI_USER
370         cpu_user = *list->user;
371 #endif
372 #ifdef MULTI_CACHE
373         cpu_cache = *list->cache;
374 #endif
375
376         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
377                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
378                proc_arch[cpu_architecture()], cr_alignment);
379
380         sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
381         sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
382         elf_hwcap = list->elf_hwcap;
383 #ifndef CONFIG_ARM_THUMB
384         elf_hwcap &= ~HWCAP_THUMB;
385 #endif
386
387         feat_v6_fixup();
388
389         cacheid_init();
390         cpu_proc_init();
391 }
392
393 /*
394  * cpu_init - initialise one CPU.
395  *
396  * cpu_init sets up the per-CPU stacks.
397  */
398 void cpu_init(void)
399 {
400         unsigned int cpu = smp_processor_id();
401         struct stack *stk = &stacks[cpu];
402
403         if (cpu >= NR_CPUS) {
404                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
405                 BUG();
406         }
407
408         /*
409          * Define the placement constraint for the inline asm directive below.
410          * In Thumb-2, msr with an immediate value is not allowed.
411          */
412 #ifdef CONFIG_THUMB2_KERNEL
413 #define PLC     "r"
414 #else
415 #define PLC     "I"
416 #endif
417
418         /*
419          * setup stacks for re-entrant exception handlers
420          */
421         __asm__ (
422         "msr    cpsr_c, %1\n\t"
423         "add    r14, %0, %2\n\t"
424         "mov    sp, r14\n\t"
425         "msr    cpsr_c, %3\n\t"
426         "add    r14, %0, %4\n\t"
427         "mov    sp, r14\n\t"
428         "msr    cpsr_c, %5\n\t"
429         "add    r14, %0, %6\n\t"
430         "mov    sp, r14\n\t"
431         "msr    cpsr_c, %7"
432             :
433             : "r" (stk),
434               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
435               "I" (offsetof(struct stack, irq[0])),
436               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
437               "I" (offsetof(struct stack, abt[0])),
438               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
439               "I" (offsetof(struct stack, und[0])),
440               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
441             : "r14");
442 }
443
444 void __init dump_machine_table(void)
445 {
446         struct machine_desc *p;
447
448         early_print("Available machine support:\n\nID (hex)\tNAME\n");
449         for_each_machine_desc(p)
450                 early_print("%08x\t%s\n", p->nr, p->name);
451
452         early_print("\nPlease check your kernel config and/or bootloader.\n");
453
454         while (true)
455                 /* can't use cpu_relax() here as it may require MMU setup */;
456 }
457
458 int __init arm_add_memory(phys_addr_t start, unsigned long size)
459 {
460         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
461
462         if (meminfo.nr_banks >= NR_BANKS) {
463                 printk(KERN_CRIT "NR_BANKS too low, "
464                         "ignoring memory at 0x%08llx\n", (long long)start);
465                 return -EINVAL;
466         }
467
468         /*
469          * Ensure that start/size are aligned to a page boundary.
470          * Size is appropriately rounded down, start is rounded up.
471          */
472         size -= start & ~PAGE_MASK;
473         bank->start = PAGE_ALIGN(start);
474         bank->size  = size & PAGE_MASK;
475
476         /*
477          * Check whether this memory region has non-zero size or
478          * invalid node number.
479          */
480         if (bank->size == 0)
481                 return -EINVAL;
482
483         meminfo.nr_banks++;
484         return 0;
485 }
486
487 /*
488  * Pick out the memory size.  We look for mem=size@start,
489  * where start and size are "size[KkMm]"
490  */
491 static int __init early_mem(char *p)
492 {
493         static int usermem __initdata = 0;
494         unsigned long size;
495         phys_addr_t start;
496         char *endp;
497
498         /*
499          * If the user specifies memory size, we
500          * blow away any automatically generated
501          * size.
502          */
503         if (usermem == 0) {
504                 usermem = 1;
505                 meminfo.nr_banks = 0;
506         }
507
508         start = PHYS_OFFSET;
509         size  = memparse(p, &endp);
510         if (*endp == '@')
511                 start = memparse(endp + 1, NULL);
512
513         arm_add_memory(start, size);
514
515         return 0;
516 }
517 early_param("mem", early_mem);
518
519 static void __init
520 setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
521 {
522 #ifdef CONFIG_BLK_DEV_RAM
523         extern int rd_size, rd_image_start, rd_prompt, rd_doload;
524
525         rd_image_start = image_start;
526         rd_prompt = prompt;
527         rd_doload = doload;
528
529         if (rd_sz)
530                 rd_size = rd_sz;
531 #endif
532 }
533
534 static void __init request_standard_resources(struct machine_desc *mdesc)
535 {
536         struct memblock_region *region;
537         struct resource *res;
538
539         kernel_code.start   = virt_to_phys(_text);
540         kernel_code.end     = virt_to_phys(_etext - 1);
541         kernel_data.start   = virt_to_phys(_sdata);
542         kernel_data.end     = virt_to_phys(_end - 1);
543
544         for_each_memblock(memory, region) {
545                 res = alloc_bootmem_low(sizeof(*res));
546                 res->name  = "System RAM";
547                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
548                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
549                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
550
551                 request_resource(&iomem_resource, res);
552
553                 if (kernel_code.start >= res->start &&
554                     kernel_code.end <= res->end)
555                         request_resource(res, &kernel_code);
556                 if (kernel_data.start >= res->start &&
557                     kernel_data.end <= res->end)
558                         request_resource(res, &kernel_data);
559         }
560
561         if (mdesc->video_start) {
562                 video_ram.start = mdesc->video_start;
563                 video_ram.end   = mdesc->video_end;
564                 request_resource(&iomem_resource, &video_ram);
565         }
566
567         /*
568          * Some machines don't have the possibility of ever
569          * possessing lp0, lp1 or lp2
570          */
571         if (mdesc->reserve_lp0)
572                 request_resource(&ioport_resource, &lp0);
573         if (mdesc->reserve_lp1)
574                 request_resource(&ioport_resource, &lp1);
575         if (mdesc->reserve_lp2)
576                 request_resource(&ioport_resource, &lp2);
577 }
578
579 /*
580  *  Tag parsing.
581  *
582  * This is the new way of passing data to the kernel at boot time.  Rather
583  * than passing a fixed inflexible structure to the kernel, we pass a list
584  * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
585  * tag for the list to be recognised (to distinguish the tagged list from
586  * a param_struct).  The list is terminated with a zero-length tag (this tag
587  * is not parsed in any way).
588  */
589 static int __init parse_tag_core(const struct tag *tag)
590 {
591         if (tag->hdr.size > 2) {
592                 if ((tag->u.core.flags & 1) == 0)
593                         root_mountflags &= ~MS_RDONLY;
594                 ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
595         }
596         return 0;
597 }
598
599 __tagtable(ATAG_CORE, parse_tag_core);
600
601 static int __init parse_tag_mem32(const struct tag *tag)
602 {
603         return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
604 }
605
606 __tagtable(ATAG_MEM, parse_tag_mem32);
607
608 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
609 struct screen_info screen_info = {
610  .orig_video_lines      = 30,
611  .orig_video_cols       = 80,
612  .orig_video_mode       = 0,
613  .orig_video_ega_bx     = 0,
614  .orig_video_isVGA      = 1,
615  .orig_video_points     = 8
616 };
617
618 static int __init parse_tag_videotext(const struct tag *tag)
619 {
620         screen_info.orig_x            = tag->u.videotext.x;
621         screen_info.orig_y            = tag->u.videotext.y;
622         screen_info.orig_video_page   = tag->u.videotext.video_page;
623         screen_info.orig_video_mode   = tag->u.videotext.video_mode;
624         screen_info.orig_video_cols   = tag->u.videotext.video_cols;
625         screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
626         screen_info.orig_video_lines  = tag->u.videotext.video_lines;
627         screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
628         screen_info.orig_video_points = tag->u.videotext.video_points;
629         return 0;
630 }
631
632 __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
633 #endif
634
635 static int __init parse_tag_ramdisk(const struct tag *tag)
636 {
637         setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
638                       (tag->u.ramdisk.flags & 2) == 0,
639                       tag->u.ramdisk.start, tag->u.ramdisk.size);
640         return 0;
641 }
642
643 __tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
644
645 static int __init parse_tag_serialnr(const struct tag *tag)
646 {
647         system_serial_low = tag->u.serialnr.low;
648         system_serial_high = tag->u.serialnr.high;
649         return 0;
650 }
651
652 __tagtable(ATAG_SERIAL, parse_tag_serialnr);
653
654 static int __init parse_tag_revision(const struct tag *tag)
655 {
656         system_rev = tag->u.revision.rev;
657         return 0;
658 }
659
660 __tagtable(ATAG_REVISION, parse_tag_revision);
661
662 static int __init parse_tag_cmdline(const struct tag *tag)
663 {
664 #if defined(CONFIG_CMDLINE_EXTEND)
665         strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
666         strlcat(default_command_line, tag->u.cmdline.cmdline,
667                 COMMAND_LINE_SIZE);
668 #elif defined(CONFIG_CMDLINE_FORCE)
669         pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
670 #else
671         strlcpy(default_command_line, tag->u.cmdline.cmdline,
672                 COMMAND_LINE_SIZE);
673 #endif
674         return 0;
675 }
676
677 __tagtable(ATAG_CMDLINE, parse_tag_cmdline);
678
679 /*
680  * Scan the tag table for this tag, and call its parse function.
681  * The tag table is built by the linker from all the __tagtable
682  * declarations.
683  */
684 static int __init parse_tag(const struct tag *tag)
685 {
686         extern struct tagtable __tagtable_begin, __tagtable_end;
687         struct tagtable *t;
688
689         for (t = &__tagtable_begin; t < &__tagtable_end; t++)
690                 if (tag->hdr.tag == t->tag) {
691                         t->parse(tag);
692                         break;
693                 }
694
695         return t < &__tagtable_end;
696 }
697
698 /*
699  * Parse all tags in the list, checking both the global and architecture
700  * specific tag tables.
701  */
702 static void __init parse_tags(const struct tag *t)
703 {
704         for (; t->hdr.size; t = tag_next(t))
705                 if (!parse_tag(t))
706                         printk(KERN_WARNING
707                                 "Ignoring unrecognised tag 0x%08x\n",
708                                 t->hdr.tag);
709 }
710
711 /*
712  * This holds our defaults.
713  */
714 static struct init_tags {
715         struct tag_header hdr1;
716         struct tag_core   core;
717         struct tag_header hdr2;
718         struct tag_mem32  mem;
719         struct tag_header hdr3;
720 } init_tags __initdata = {
721         { tag_size(tag_core), ATAG_CORE },
722         { 1, PAGE_SIZE, 0xff },
723         { tag_size(tag_mem32), ATAG_MEM },
724         { MEM_SIZE },
725         { 0, ATAG_NONE }
726 };
727
728 static int __init customize_machine(void)
729 {
730         /* customizes platform devices, or adds new ones */
731         if (machine_desc->init_machine)
732                 machine_desc->init_machine();
733         return 0;
734 }
735 arch_initcall(customize_machine);
736
737 #ifdef CONFIG_KEXEC
738 static inline unsigned long long get_total_mem(void)
739 {
740         unsigned long total;
741
742         total = max_low_pfn - min_low_pfn;
743         return total << PAGE_SHIFT;
744 }
745
746 /**
747  * reserve_crashkernel() - reserves memory are for crash kernel
748  *
749  * This function reserves memory area given in "crashkernel=" kernel command
750  * line parameter. The memory reserved is used by a dump capture kernel when
751  * primary kernel is crashing.
752  */
753 static void __init reserve_crashkernel(void)
754 {
755         unsigned long long crash_size, crash_base;
756         unsigned long long total_mem;
757         int ret;
758
759         total_mem = get_total_mem();
760         ret = parse_crashkernel(boot_command_line, total_mem,
761                                 &crash_size, &crash_base);
762         if (ret)
763                 return;
764
765         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
766         if (ret < 0) {
767                 printk(KERN_WARNING "crashkernel reservation failed - "
768                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
769                 return;
770         }
771
772         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
773                "for crashkernel (System RAM: %ldMB)\n",
774                (unsigned long)(crash_size >> 20),
775                (unsigned long)(crash_base >> 20),
776                (unsigned long)(total_mem >> 20));
777
778         crashk_res.start = crash_base;
779         crashk_res.end = crash_base + crash_size - 1;
780         insert_resource(&iomem_resource, &crashk_res);
781 }
782 #else
783 static inline void reserve_crashkernel(void) {}
784 #endif /* CONFIG_KEXEC */
785
786 static void __init squash_mem_tags(struct tag *tag)
787 {
788         for (; tag->hdr.size; tag = tag_next(tag))
789                 if (tag->hdr.tag == ATAG_MEM)
790                         tag->hdr.tag = ATAG_NONE;
791 }
792
793 static struct machine_desc * __init setup_machine_tags(unsigned int nr)
794 {
795         struct tag *tags = (struct tag *)&init_tags;
796         struct machine_desc *mdesc = NULL, *p;
797         char *from = default_command_line;
798
799         init_tags.mem.start = PHYS_OFFSET;
800
801         /*
802          * locate machine in the list of supported machines.
803          */
804         for_each_machine_desc(p)
805                 if (nr == p->nr) {
806                         printk("Machine: %s\n", p->name);
807                         mdesc = p;
808                         break;
809                 }
810
811         if (!mdesc) {
812                 early_print("\nError: unrecognized/unsupported machine ID"
813                         " (r1 = 0x%08x).\n\n", nr);
814                 dump_machine_table(); /* does not return */
815         }
816
817         if (__atags_pointer)
818                 tags = phys_to_virt(__atags_pointer);
819         else if (mdesc->boot_params) {
820 #ifdef CONFIG_MMU
821                 /*
822                  * We still are executing with a minimal MMU mapping created
823                  * with the presumption that the machine default for this
824                  * is located in the first MB of RAM.  Anything else will
825                  * fault and silently hang the kernel at this point.
826                  */
827                 if (mdesc->boot_params < PHYS_OFFSET ||
828                     mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
829                         printk(KERN_WARNING
830                                "Default boot params at physical 0x%08lx out of reach\n",
831                                mdesc->boot_params);
832                 } else
833 #endif
834                 {
835                         tags = phys_to_virt(mdesc->boot_params);
836                 }
837         }
838
839 #if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
840         /*
841          * If we have the old style parameters, convert them to
842          * a tag list.
843          */
844         if (tags->hdr.tag != ATAG_CORE)
845                 convert_to_tag_list(tags);
846 #endif
847
848         if (tags->hdr.tag != ATAG_CORE) {
849 #if defined(CONFIG_OF)
850                 /*
851                  * If CONFIG_OF is set, then assume this is a reasonably
852                  * modern system that should pass boot parameters
853                  */
854                 early_print("Warning: Neither atags nor dtb found\n");
855 #endif
856                 tags = (struct tag *)&init_tags;
857         }
858
859         if (mdesc->fixup)
860                 mdesc->fixup(mdesc, tags, &from, &meminfo);
861
862         if (tags->hdr.tag == ATAG_CORE) {
863                 if (meminfo.nr_banks != 0)
864                         squash_mem_tags(tags);
865                 save_atags(tags);
866                 parse_tags(tags);
867         }
868
869         /* parse_early_param needs a boot_command_line */
870         strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
871
872         return mdesc;
873 }
874
875
876 void __init setup_arch(char **cmdline_p)
877 {
878         struct machine_desc *mdesc;
879
880         unwind_init();
881
882         setup_processor();
883         mdesc = setup_machine_fdt(__atags_pointer);
884         if (!mdesc)
885                 mdesc = setup_machine_tags(machine_arch_type);
886         machine_desc = mdesc;
887         machine_name = mdesc->name;
888
889         if (mdesc->soft_reboot)
890                 reboot_setup("s");
891
892         init_mm.start_code = (unsigned long) _text;
893         init_mm.end_code   = (unsigned long) _etext;
894         init_mm.end_data   = (unsigned long) _edata;
895         init_mm.brk        = (unsigned long) _end;
896
897         /* populate cmd_line too for later use, preserving boot_command_line */
898         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
899         *cmdline_p = cmd_line;
900
901         parse_early_param();
902
903         arm_memblock_init(&meminfo, mdesc);
904
905         paging_init(mdesc);
906         request_standard_resources(mdesc);
907
908         unflatten_device_tree();
909
910 #ifdef CONFIG_SMP
911         if (is_smp())
912                 smp_init_cpus();
913 #endif
914         reserve_crashkernel();
915
916         cpu_init();
917         tcm_init();
918
919 #ifdef CONFIG_MULTI_IRQ_HANDLER
920         handle_arch_irq = mdesc->handle_irq;
921 #endif
922
923 #ifdef CONFIG_VT
924 #if defined(CONFIG_VGA_CONSOLE)
925         conswitchp = &vga_con;
926 #elif defined(CONFIG_DUMMY_CONSOLE)
927         conswitchp = &dummy_con;
928 #endif
929 #endif
930         early_trap_init();
931
932         if (mdesc->init_early)
933                 mdesc->init_early();
934 }
935
936
937 static int __init topology_init(void)
938 {
939         int cpu;
940
941         for_each_possible_cpu(cpu) {
942                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
943                 cpuinfo->cpu.hotpluggable = 1;
944                 register_cpu(&cpuinfo->cpu, cpu);
945         }
946
947         return 0;
948 }
949 subsys_initcall(topology_init);
950
951 #ifdef CONFIG_HAVE_PROC_CPU
952 static int __init proc_cpu_init(void)
953 {
954         struct proc_dir_entry *res;
955
956         res = proc_mkdir("cpu", NULL);
957         if (!res)
958                 return -ENOMEM;
959         return 0;
960 }
961 fs_initcall(proc_cpu_init);
962 #endif
963
964 static const char *hwcap_str[] = {
965         "swp",
966         "half",
967         "thumb",
968         "26bit",
969         "fastmult",
970         "fpa",
971         "vfp",
972         "edsp",
973         "java",
974         "iwmmxt",
975         "crunch",
976         "thumbee",
977         "neon",
978         "vfpv3",
979         "vfpv3d16",
980         NULL
981 };
982
983 static int c_show(struct seq_file *m, void *v)
984 {
985         int i;
986
987         seq_printf(m, "Processor\t: %s rev %d (%s)\n",
988                    cpu_name, read_cpuid_id() & 15, elf_platform);
989
990 #if defined(CONFIG_SMP)
991         for_each_online_cpu(i) {
992                 /*
993                  * glibc reads /proc/cpuinfo to determine the number of
994                  * online processors, looking for lines beginning with
995                  * "processor".  Give glibc what it expects.
996                  */
997                 seq_printf(m, "processor\t: %d\n", i);
998                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
999                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1000                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1001         }
1002 #else /* CONFIG_SMP */
1003         seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1004                    loops_per_jiffy / (500000/HZ),
1005                    (loops_per_jiffy / (5000/HZ)) % 100);
1006 #endif
1007
1008         /* dump out the processor features */
1009         seq_puts(m, "Features\t: ");
1010
1011         for (i = 0; hwcap_str[i]; i++)
1012                 if (elf_hwcap & (1 << i))
1013                         seq_printf(m, "%s ", hwcap_str[i]);
1014
1015         seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1016         seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1017
1018         if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1019                 /* pre-ARM7 */
1020                 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1021         } else {
1022                 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1023                         /* ARM7 */
1024                         seq_printf(m, "CPU variant\t: 0x%02x\n",
1025                                    (read_cpuid_id() >> 16) & 127);
1026                 } else {
1027                         /* post-ARM7 */
1028                         seq_printf(m, "CPU variant\t: 0x%x\n",
1029                                    (read_cpuid_id() >> 20) & 15);
1030                 }
1031                 seq_printf(m, "CPU part\t: 0x%03x\n",
1032                            (read_cpuid_id() >> 4) & 0xfff);
1033         }
1034         seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1035
1036         seq_puts(m, "\n");
1037
1038         seq_printf(m, "Hardware\t: %s\n", machine_name);
1039         seq_printf(m, "Revision\t: %04x\n", system_rev);
1040         seq_printf(m, "Serial\t\t: %08x%08x\n",
1041                    system_serial_high, system_serial_low);
1042
1043         return 0;
1044 }
1045
1046 static void *c_start(struct seq_file *m, loff_t *pos)
1047 {
1048         return *pos < 1 ? (void *)1 : NULL;
1049 }
1050
1051 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1052 {
1053         ++*pos;
1054         return NULL;
1055 }
1056
1057 static void c_stop(struct seq_file *m, void *v)
1058 {
1059 }
1060
1061 const struct seq_operations cpuinfo_op = {
1062         .start  = c_start,
1063         .next   = c_next,
1064         .stop   = c_stop,
1065         .show   = c_show
1066 };