Merge branch 'sh-latest' of git://github.com/pmundt/linux-sh
[pandora-kernel.git] / arch / arm / mm / init.c
1 /*
2  *  linux/arch/arm/mm/init.c
3  *
4  *  Copyright (C) 1995-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/of_fdt.h>
19 #include <linux/highmem.h>
20 #include <linux/gfp.h>
21 #include <linux/memblock.h>
22 #include <linux/sort.h>
23
24 #include <asm/mach-types.h>
25 #include <asm/prom.h>
26 #include <asm/sections.h>
27 #include <asm/setup.h>
28 #include <asm/sizes.h>
29 #include <asm/tlb.h>
30 #include <asm/fixmap.h>
31
32 #include <asm/mach/arch.h>
33 #include <asm/mach/map.h>
34
35 #include "mm.h"
36
37 static unsigned long phys_initrd_start __initdata = 0;
38 static unsigned long phys_initrd_size __initdata = 0;
39
40 static int __init early_initrd(char *p)
41 {
42         unsigned long start, size;
43         char *endp;
44
45         start = memparse(p, &endp);
46         if (*endp == ',') {
47                 size = memparse(endp + 1, NULL);
48
49                 phys_initrd_start = start;
50                 phys_initrd_size = size;
51         }
52         return 0;
53 }
54 early_param("initrd", early_initrd);
55
56 static int __init parse_tag_initrd(const struct tag *tag)
57 {
58         printk(KERN_WARNING "ATAG_INITRD is deprecated; "
59                 "please update your bootloader.\n");
60         phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
61         phys_initrd_size = tag->u.initrd.size;
62         return 0;
63 }
64
65 __tagtable(ATAG_INITRD, parse_tag_initrd);
66
67 static int __init parse_tag_initrd2(const struct tag *tag)
68 {
69         phys_initrd_start = tag->u.initrd.start;
70         phys_initrd_size = tag->u.initrd.size;
71         return 0;
72 }
73
74 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
75
76 #ifdef CONFIG_OF_FLATTREE
77 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
78 {
79         phys_initrd_start = start;
80         phys_initrd_size = end - start;
81 }
82 #endif /* CONFIG_OF_FLATTREE */
83
84 /*
85  * This keeps memory configuration data used by a couple memory
86  * initialization functions, as well as show_mem() for the skipping
87  * of holes in the memory map.  It is populated by arm_add_memory().
88  */
89 struct meminfo meminfo;
90
91 void show_mem(unsigned int filter)
92 {
93         int free = 0, total = 0, reserved = 0;
94         int shared = 0, cached = 0, slab = 0, i;
95         struct meminfo * mi = &meminfo;
96
97         printk("Mem-info:\n");
98         show_free_areas(filter);
99
100         for_each_bank (i, mi) {
101                 struct membank *bank = &mi->bank[i];
102                 unsigned int pfn1, pfn2;
103                 struct page *page, *end;
104
105                 pfn1 = bank_pfn_start(bank);
106                 pfn2 = bank_pfn_end(bank);
107
108                 page = pfn_to_page(pfn1);
109                 end  = pfn_to_page(pfn2 - 1) + 1;
110
111                 do {
112                         total++;
113                         if (PageReserved(page))
114                                 reserved++;
115                         else if (PageSwapCache(page))
116                                 cached++;
117                         else if (PageSlab(page))
118                                 slab++;
119                         else if (!page_count(page))
120                                 free++;
121                         else
122                                 shared += page_count(page) - 1;
123                         page++;
124                 } while (page < end);
125         }
126
127         printk("%d pages of RAM\n", total);
128         printk("%d free pages\n", free);
129         printk("%d reserved pages\n", reserved);
130         printk("%d slab pages\n", slab);
131         printk("%d pages shared\n", shared);
132         printk("%d pages swap cached\n", cached);
133 }
134
135 static void __init find_limits(unsigned long *min, unsigned long *max_low,
136         unsigned long *max_high)
137 {
138         struct meminfo *mi = &meminfo;
139         int i;
140
141         *min = -1UL;
142         *max_low = *max_high = 0;
143
144         for_each_bank (i, mi) {
145                 struct membank *bank = &mi->bank[i];
146                 unsigned long start, end;
147
148                 start = bank_pfn_start(bank);
149                 end = bank_pfn_end(bank);
150
151                 if (*min > start)
152                         *min = start;
153                 if (*max_high < end)
154                         *max_high = end;
155                 if (bank->highmem)
156                         continue;
157                 if (*max_low < end)
158                         *max_low = end;
159         }
160 }
161
162 static void __init arm_bootmem_init(unsigned long start_pfn,
163         unsigned long end_pfn)
164 {
165         struct memblock_region *reg;
166         unsigned int boot_pages;
167         phys_addr_t bitmap;
168         pg_data_t *pgdat;
169
170         /*
171          * Allocate the bootmem bitmap page.  This must be in a region
172          * of memory which has already been mapped.
173          */
174         boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
175         bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
176                                 __pfn_to_phys(end_pfn));
177
178         /*
179          * Initialise the bootmem allocator, handing the
180          * memory banks over to bootmem.
181          */
182         node_set_online(0);
183         pgdat = NODE_DATA(0);
184         init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
185
186         /* Free the lowmem regions from memblock into bootmem. */
187         for_each_memblock(memory, reg) {
188                 unsigned long start = memblock_region_memory_base_pfn(reg);
189                 unsigned long end = memblock_region_memory_end_pfn(reg);
190
191                 if (end >= end_pfn)
192                         end = end_pfn;
193                 if (start >= end)
194                         break;
195
196                 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
197         }
198
199         /* Reserve the lowmem memblock reserved regions in bootmem. */
200         for_each_memblock(reserved, reg) {
201                 unsigned long start = memblock_region_reserved_base_pfn(reg);
202                 unsigned long end = memblock_region_reserved_end_pfn(reg);
203
204                 if (end >= end_pfn)
205                         end = end_pfn;
206                 if (start >= end)
207                         break;
208
209                 reserve_bootmem(__pfn_to_phys(start),
210                                 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
211         }
212 }
213
214 #ifdef CONFIG_ZONE_DMA
215
216 unsigned long arm_dma_zone_size __read_mostly;
217 EXPORT_SYMBOL(arm_dma_zone_size);
218
219 /*
220  * The DMA mask corresponding to the maximum bus address allocatable
221  * using GFP_DMA.  The default here places no restriction on DMA
222  * allocations.  This must be the smallest DMA mask in the system,
223  * so a successful GFP_DMA allocation will always satisfy this.
224  */
225 u32 arm_dma_limit;
226
227 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
228         unsigned long dma_size)
229 {
230         if (size[0] <= dma_size)
231                 return;
232
233         size[ZONE_NORMAL] = size[0] - dma_size;
234         size[ZONE_DMA] = dma_size;
235         hole[ZONE_NORMAL] = hole[0];
236         hole[ZONE_DMA] = 0;
237 }
238 #endif
239
240 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
241         unsigned long max_high)
242 {
243         unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
244         struct memblock_region *reg;
245
246         /*
247          * initialise the zones.
248          */
249         memset(zone_size, 0, sizeof(zone_size));
250
251         /*
252          * The memory size has already been determined.  If we need
253          * to do anything fancy with the allocation of this memory
254          * to the zones, now is the time to do it.
255          */
256         zone_size[0] = max_low - min;
257 #ifdef CONFIG_HIGHMEM
258         zone_size[ZONE_HIGHMEM] = max_high - max_low;
259 #endif
260
261         /*
262          * Calculate the size of the holes.
263          *  holes = node_size - sum(bank_sizes)
264          */
265         memcpy(zhole_size, zone_size, sizeof(zhole_size));
266         for_each_memblock(memory, reg) {
267                 unsigned long start = memblock_region_memory_base_pfn(reg);
268                 unsigned long end = memblock_region_memory_end_pfn(reg);
269
270                 if (start < max_low) {
271                         unsigned long low_end = min(end, max_low);
272                         zhole_size[0] -= low_end - start;
273                 }
274 #ifdef CONFIG_HIGHMEM
275                 if (end > max_low) {
276                         unsigned long high_start = max(start, max_low);
277                         zhole_size[ZONE_HIGHMEM] -= end - high_start;
278                 }
279 #endif
280         }
281
282 #ifdef CONFIG_ZONE_DMA
283         /*
284          * Adjust the sizes according to any special requirements for
285          * this machine type.
286          */
287         if (arm_dma_zone_size) {
288                 arm_adjust_dma_zone(zone_size, zhole_size,
289                         arm_dma_zone_size >> PAGE_SHIFT);
290                 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
291         } else
292                 arm_dma_limit = 0xffffffff;
293 #endif
294
295         free_area_init_node(0, zone_size, min, zhole_size);
296 }
297
298 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
299 int pfn_valid(unsigned long pfn)
300 {
301         return memblock_is_memory(__pfn_to_phys(pfn));
302 }
303 EXPORT_SYMBOL(pfn_valid);
304 #endif
305
306 #ifndef CONFIG_SPARSEMEM
307 static void arm_memory_present(void)
308 {
309 }
310 #else
311 static void arm_memory_present(void)
312 {
313         struct memblock_region *reg;
314
315         for_each_memblock(memory, reg)
316                 memory_present(0, memblock_region_memory_base_pfn(reg),
317                                memblock_region_memory_end_pfn(reg));
318 }
319 #endif
320
321 static int __init meminfo_cmp(const void *_a, const void *_b)
322 {
323         const struct membank *a = _a, *b = _b;
324         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
325         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
326 }
327
328 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
329 {
330         int i;
331
332         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
333
334         memblock_init();
335         for (i = 0; i < mi->nr_banks; i++)
336                 memblock_add(mi->bank[i].start, mi->bank[i].size);
337
338         /* Register the kernel text, kernel data and initrd with memblock. */
339 #ifdef CONFIG_XIP_KERNEL
340         memblock_reserve(__pa(_sdata), _end - _sdata);
341 #else
342         memblock_reserve(__pa(_stext), _end - _stext);
343 #endif
344 #ifdef CONFIG_BLK_DEV_INITRD
345         if (phys_initrd_size &&
346             !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
347                 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
348                        phys_initrd_start, phys_initrd_size);
349                 phys_initrd_start = phys_initrd_size = 0;
350         }
351         if (phys_initrd_size &&
352             memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
353                 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
354                        phys_initrd_start, phys_initrd_size);
355                 phys_initrd_start = phys_initrd_size = 0;
356         }
357         if (phys_initrd_size) {
358                 memblock_reserve(phys_initrd_start, phys_initrd_size);
359
360                 /* Now convert initrd to virtual addresses */
361                 initrd_start = __phys_to_virt(phys_initrd_start);
362                 initrd_end = initrd_start + phys_initrd_size;
363         }
364 #endif
365
366         arm_mm_memblock_reserve();
367         arm_dt_memblock_reserve();
368
369         /* reserve any platform specific memblock areas */
370         if (mdesc->reserve)
371                 mdesc->reserve();
372
373         memblock_analyze();
374         memblock_dump_all();
375 }
376
377 void __init bootmem_init(void)
378 {
379         unsigned long min, max_low, max_high;
380
381         max_low = max_high = 0;
382
383         find_limits(&min, &max_low, &max_high);
384
385         arm_bootmem_init(min, max_low);
386
387         /*
388          * Sparsemem tries to allocate bootmem in memory_present(),
389          * so must be done after the fixed reservations
390          */
391         arm_memory_present();
392
393         /*
394          * sparse_init() needs the bootmem allocator up and running.
395          */
396         sparse_init();
397
398         /*
399          * Now free the memory - free_area_init_node needs
400          * the sparse mem_map arrays initialized by sparse_init()
401          * for memmap_init_zone(), otherwise all PFNs are invalid.
402          */
403         arm_bootmem_free(min, max_low, max_high);
404
405         high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
406
407         /*
408          * This doesn't seem to be used by the Linux memory manager any
409          * more, but is used by ll_rw_block.  If we can get rid of it, we
410          * also get rid of some of the stuff above as well.
411          *
412          * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
413          * the system, not the maximum PFN.
414          */
415         max_low_pfn = max_low - PHYS_PFN_OFFSET;
416         max_pfn = max_high - PHYS_PFN_OFFSET;
417 }
418
419 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
420 {
421         unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
422
423         for (; pfn < end; pfn++) {
424                 struct page *page = pfn_to_page(pfn);
425                 ClearPageReserved(page);
426                 init_page_count(page);
427                 __free_page(page);
428                 pages++;
429         }
430
431         if (size && s)
432                 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
433
434         return pages;
435 }
436
437 /*
438  * Poison init memory with an undefined instruction (ARM) or a branch to an
439  * undefined instruction (Thumb).
440  */
441 static inline void poison_init_mem(void *s, size_t count)
442 {
443         u32 *p = (u32 *)s;
444         for (; count != 0; count -= 4)
445                 *p++ = 0xe7fddef0;
446 }
447
448 static inline void
449 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
450 {
451         struct page *start_pg, *end_pg;
452         unsigned long pg, pgend;
453
454         /*
455          * Convert start_pfn/end_pfn to a struct page pointer.
456          */
457         start_pg = pfn_to_page(start_pfn - 1) + 1;
458         end_pg = pfn_to_page(end_pfn - 1) + 1;
459
460         /*
461          * Convert to physical addresses, and
462          * round start upwards and end downwards.
463          */
464         pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
465         pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
466
467         /*
468          * If there are free pages between these,
469          * free the section of the memmap array.
470          */
471         if (pg < pgend)
472                 free_bootmem(pg, pgend - pg);
473 }
474
475 /*
476  * The mem_map array can get very big.  Free the unused area of the memory map.
477  */
478 static void __init free_unused_memmap(struct meminfo *mi)
479 {
480         unsigned long bank_start, prev_bank_end = 0;
481         unsigned int i;
482
483         /*
484          * This relies on each bank being in address order.
485          * The banks are sorted previously in bootmem_init().
486          */
487         for_each_bank(i, mi) {
488                 struct membank *bank = &mi->bank[i];
489
490                 bank_start = bank_pfn_start(bank);
491
492 #ifdef CONFIG_SPARSEMEM
493                 /*
494                  * Take care not to free memmap entries that don't exist
495                  * due to SPARSEMEM sections which aren't present.
496                  */
497                 bank_start = min(bank_start,
498                                  ALIGN(prev_bank_end, PAGES_PER_SECTION));
499 #else
500                 /*
501                  * Align down here since the VM subsystem insists that the
502                  * memmap entries are valid from the bank start aligned to
503                  * MAX_ORDER_NR_PAGES.
504                  */
505                 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
506 #endif
507                 /*
508                  * If we had a previous bank, and there is a space
509                  * between the current bank and the previous, free it.
510                  */
511                 if (prev_bank_end && prev_bank_end < bank_start)
512                         free_memmap(prev_bank_end, bank_start);
513
514                 /*
515                  * Align up here since the VM subsystem insists that the
516                  * memmap entries are valid from the bank end aligned to
517                  * MAX_ORDER_NR_PAGES.
518                  */
519                 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
520         }
521
522 #ifdef CONFIG_SPARSEMEM
523         if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
524                 free_memmap(prev_bank_end,
525                             ALIGN(prev_bank_end, PAGES_PER_SECTION));
526 #endif
527 }
528
529 static void __init free_highpages(void)
530 {
531 #ifdef CONFIG_HIGHMEM
532         unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
533         struct memblock_region *mem, *res;
534
535         /* set highmem page free */
536         for_each_memblock(memory, mem) {
537                 unsigned long start = memblock_region_memory_base_pfn(mem);
538                 unsigned long end = memblock_region_memory_end_pfn(mem);
539
540                 /* Ignore complete lowmem entries */
541                 if (end <= max_low)
542                         continue;
543
544                 /* Truncate partial highmem entries */
545                 if (start < max_low)
546                         start = max_low;
547
548                 /* Find and exclude any reserved regions */
549                 for_each_memblock(reserved, res) {
550                         unsigned long res_start, res_end;
551
552                         res_start = memblock_region_reserved_base_pfn(res);
553                         res_end = memblock_region_reserved_end_pfn(res);
554
555                         if (res_end < start)
556                                 continue;
557                         if (res_start < start)
558                                 res_start = start;
559                         if (res_start > end)
560                                 res_start = end;
561                         if (res_end > end)
562                                 res_end = end;
563                         if (res_start != start)
564                                 totalhigh_pages += free_area(start, res_start,
565                                                              NULL);
566                         start = res_end;
567                         if (start == end)
568                                 break;
569                 }
570
571                 /* And now free anything which remains */
572                 if (start < end)
573                         totalhigh_pages += free_area(start, end, NULL);
574         }
575         totalram_pages += totalhigh_pages;
576 #endif
577 }
578
579 /*
580  * mem_init() marks the free areas in the mem_map and tells us how much
581  * memory is free.  This is done after various parts of the system have
582  * claimed their memory after the kernel image.
583  */
584 void __init mem_init(void)
585 {
586         unsigned long reserved_pages, free_pages;
587         struct memblock_region *reg;
588         int i;
589 #ifdef CONFIG_HAVE_TCM
590         /* These pointers are filled in on TCM detection */
591         extern u32 dtcm_end;
592         extern u32 itcm_end;
593 #endif
594
595         max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
596
597         /* this will put all unused low memory onto the freelists */
598         free_unused_memmap(&meminfo);
599
600         totalram_pages += free_all_bootmem();
601
602 #ifdef CONFIG_SA1111
603         /* now that our DMA memory is actually so designated, we can free it */
604         totalram_pages += free_area(PHYS_PFN_OFFSET,
605                                     __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
606 #endif
607
608         free_highpages();
609
610         reserved_pages = free_pages = 0;
611
612         for_each_bank(i, &meminfo) {
613                 struct membank *bank = &meminfo.bank[i];
614                 unsigned int pfn1, pfn2;
615                 struct page *page, *end;
616
617                 pfn1 = bank_pfn_start(bank);
618                 pfn2 = bank_pfn_end(bank);
619
620                 page = pfn_to_page(pfn1);
621                 end  = pfn_to_page(pfn2 - 1) + 1;
622
623                 do {
624                         if (PageReserved(page))
625                                 reserved_pages++;
626                         else if (!page_count(page))
627                                 free_pages++;
628                         page++;
629                 } while (page < end);
630         }
631
632         /*
633          * Since our memory may not be contiguous, calculate the
634          * real number of pages we have in this system
635          */
636         printk(KERN_INFO "Memory:");
637         num_physpages = 0;
638         for_each_memblock(memory, reg) {
639                 unsigned long pages = memblock_region_memory_end_pfn(reg) -
640                         memblock_region_memory_base_pfn(reg);
641                 num_physpages += pages;
642                 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
643         }
644         printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
645
646         printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
647                 nr_free_pages() << (PAGE_SHIFT-10),
648                 free_pages << (PAGE_SHIFT-10),
649                 reserved_pages << (PAGE_SHIFT-10),
650                 totalhigh_pages << (PAGE_SHIFT-10));
651
652 #define MLK(b, t) b, t, ((t) - (b)) >> 10
653 #define MLM(b, t) b, t, ((t) - (b)) >> 20
654 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
655
656         printk(KERN_NOTICE "Virtual kernel memory layout:\n"
657                         "    vector  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
658 #ifdef CONFIG_HAVE_TCM
659                         "    DTCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
660                         "    ITCM    : 0x%08lx - 0x%08lx   (%4ld kB)\n"
661 #endif
662                         "    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
663                         "    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
664                         "    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
665 #ifdef CONFIG_HIGHMEM
666                         "    pkmap   : 0x%08lx - 0x%08lx   (%4ld MB)\n"
667 #endif
668                         "    modules : 0x%08lx - 0x%08lx   (%4ld MB)\n"
669                         "      .text : 0x%p" " - 0x%p" "   (%4d kB)\n"
670                         "      .init : 0x%p" " - 0x%p" "   (%4d kB)\n"
671                         "      .data : 0x%p" " - 0x%p" "   (%4d kB)\n"
672                         "       .bss : 0x%p" " - 0x%p" "   (%4d kB)\n",
673
674                         MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
675                                 (PAGE_SIZE)),
676 #ifdef CONFIG_HAVE_TCM
677                         MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
678                         MLK(ITCM_OFFSET, (unsigned long) itcm_end),
679 #endif
680                         MLK(FIXADDR_START, FIXADDR_TOP),
681                         MLM(VMALLOC_START, VMALLOC_END),
682                         MLM(PAGE_OFFSET, (unsigned long)high_memory),
683 #ifdef CONFIG_HIGHMEM
684                         MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
685                                 (PAGE_SIZE)),
686 #endif
687                         MLM(MODULES_VADDR, MODULES_END),
688
689                         MLK_ROUNDUP(_text, _etext),
690                         MLK_ROUNDUP(__init_begin, __init_end),
691                         MLK_ROUNDUP(_sdata, _edata),
692                         MLK_ROUNDUP(__bss_start, __bss_stop));
693
694 #undef MLK
695 #undef MLM
696 #undef MLK_ROUNDUP
697
698         /*
699          * Check boundaries twice: Some fundamental inconsistencies can
700          * be detected at build time already.
701          */
702 #ifdef CONFIG_MMU
703         BUILD_BUG_ON(TASK_SIZE                          > MODULES_VADDR);
704         BUG_ON(TASK_SIZE                                > MODULES_VADDR);
705 #endif
706
707 #ifdef CONFIG_HIGHMEM
708         BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
709         BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE      > PAGE_OFFSET);
710 #endif
711
712         if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
713                 extern int sysctl_overcommit_memory;
714                 /*
715                  * On a machine this small we won't get
716                  * anywhere without overcommit, so turn
717                  * it on by default.
718                  */
719                 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
720         }
721 }
722
723 void free_initmem(void)
724 {
725 #ifdef CONFIG_HAVE_TCM
726         extern char __tcm_start, __tcm_end;
727
728         poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
729         totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
730                                     __phys_to_pfn(__pa(&__tcm_end)),
731                                     "TCM link");
732 #endif
733
734         poison_init_mem(__init_begin, __init_end - __init_begin);
735         if (!machine_is_integrator() && !machine_is_cintegrator())
736                 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
737                                             __phys_to_pfn(__pa(__init_end)),
738                                             "init");
739 }
740
741 #ifdef CONFIG_BLK_DEV_INITRD
742
743 static int keep_initrd;
744
745 void free_initrd_mem(unsigned long start, unsigned long end)
746 {
747         if (!keep_initrd) {
748                 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
749                 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
750                                             __phys_to_pfn(__pa(end)),
751                                             "initrd");
752         }
753 }
754
755 static int __init keepinitrd_setup(char *__unused)
756 {
757         keep_initrd = 1;
758         return 1;
759 }
760
761 __setup("keepinitrd", keepinitrd_setup);
762 #endif