2 * linux/arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/bootmem.h>
15 #include <linux/mman.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/initrd.h>
19 #include <linux/of_fdt.h>
20 #include <linux/highmem.h>
21 #include <linux/gfp.h>
22 #include <linux/memblock.h>
23 #include <linux/dma-contiguous.h>
25 #include <asm/mach-types.h>
27 #include <asm/sections.h>
28 #include <asm/setup.h>
29 #include <asm/sizes.h>
31 #include <asm/fixmap.h>
33 #include <asm/mach/arch.h>
34 #include <asm/mach/map.h>
38 static unsigned long phys_initrd_start __initdata = 0;
39 static unsigned long phys_initrd_size __initdata = 0;
41 static int __init early_initrd(char *p)
43 unsigned long start, size;
46 start = memparse(p, &endp);
48 size = memparse(endp + 1, NULL);
50 phys_initrd_start = start;
51 phys_initrd_size = size;
55 early_param("initrd", early_initrd);
57 static int __init parse_tag_initrd(const struct tag *tag)
59 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
60 "please update your bootloader.\n");
61 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
62 phys_initrd_size = tag->u.initrd.size;
66 __tagtable(ATAG_INITRD, parse_tag_initrd);
68 static int __init parse_tag_initrd2(const struct tag *tag)
70 phys_initrd_start = tag->u.initrd.start;
71 phys_initrd_size = tag->u.initrd.size;
75 __tagtable(ATAG_INITRD2, parse_tag_initrd2);
77 #ifdef CONFIG_OF_FLATTREE
78 void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
80 phys_initrd_start = start;
81 phys_initrd_size = end - start;
83 #endif /* CONFIG_OF_FLATTREE */
86 * This keeps memory configuration data used by a couple memory
87 * initialization functions, as well as show_mem() for the skipping
88 * of holes in the memory map. It is populated by arm_add_memory().
90 struct meminfo meminfo;
92 void show_mem(unsigned int filter)
94 int free = 0, total = 0, reserved = 0;
95 int shared = 0, cached = 0, slab = 0, i;
96 struct meminfo * mi = &meminfo;
98 printk("Mem-info:\n");
99 show_free_areas(filter);
101 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
104 for_each_bank (i, mi) {
105 struct membank *bank = &mi->bank[i];
106 unsigned int pfn1, pfn2;
107 struct page *page, *end;
109 pfn1 = bank_pfn_start(bank);
110 pfn2 = bank_pfn_end(bank);
112 page = pfn_to_page(pfn1);
113 end = pfn_to_page(pfn2 - 1) + 1;
117 if (PageReserved(page))
119 else if (PageSwapCache(page))
121 else if (PageSlab(page))
123 else if (!page_count(page))
126 shared += page_count(page) - 1;
128 } while (page < end);
131 printk("%d pages of RAM\n", total);
132 printk("%d free pages\n", free);
133 printk("%d reserved pages\n", reserved);
134 printk("%d slab pages\n", slab);
135 printk("%d pages shared\n", shared);
136 printk("%d pages swap cached\n", cached);
139 static void __init find_limits(unsigned long *min, unsigned long *max_low,
140 unsigned long *max_high)
142 struct meminfo *mi = &meminfo;
145 /* This assumes the meminfo array is properly sorted */
146 *min = bank_pfn_start(&mi->bank[0]);
147 for_each_bank (i, mi)
148 if (mi->bank[i].highmem)
150 *max_low = bank_pfn_end(&mi->bank[i - 1]);
151 *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
154 static void __init arm_bootmem_init(unsigned long start_pfn,
155 unsigned long end_pfn)
157 struct memblock_region *reg;
158 unsigned int boot_pages;
163 * Allocate the bootmem bitmap page. This must be in a region
164 * of memory which has already been mapped.
166 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
167 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
168 __pfn_to_phys(end_pfn));
171 * Initialise the bootmem allocator, handing the
172 * memory banks over to bootmem.
175 pgdat = NODE_DATA(0);
176 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
178 /* Free the lowmem regions from memblock into bootmem. */
179 for_each_memblock(memory, reg) {
180 unsigned long start = memblock_region_memory_base_pfn(reg);
181 unsigned long end = memblock_region_memory_end_pfn(reg);
188 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
191 /* Reserve the lowmem memblock reserved regions in bootmem. */
192 for_each_memblock(reserved, reg) {
193 unsigned long start = memblock_region_reserved_base_pfn(reg);
194 unsigned long end = memblock_region_reserved_end_pfn(reg);
201 reserve_bootmem(__pfn_to_phys(start),
202 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
206 #ifdef CONFIG_ZONE_DMA
208 unsigned long arm_dma_zone_size __read_mostly;
209 EXPORT_SYMBOL(arm_dma_zone_size);
212 * The DMA mask corresponding to the maximum bus address allocatable
213 * using GFP_DMA. The default here places no restriction on DMA
214 * allocations. This must be the smallest DMA mask in the system,
215 * so a successful GFP_DMA allocation will always satisfy this.
217 phys_addr_t arm_dma_limit;
219 static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
220 unsigned long dma_size)
222 if (size[0] <= dma_size)
225 size[ZONE_NORMAL] = size[0] - dma_size;
226 size[ZONE_DMA] = dma_size;
227 hole[ZONE_NORMAL] = hole[0];
232 void __init setup_dma_zone(struct machine_desc *mdesc)
234 #ifdef CONFIG_ZONE_DMA
235 if (mdesc->dma_zone_size) {
236 arm_dma_zone_size = mdesc->dma_zone_size;
237 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
239 arm_dma_limit = 0xffffffff;
243 static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
244 unsigned long max_high)
246 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
247 struct memblock_region *reg;
250 * initialise the zones.
252 memset(zone_size, 0, sizeof(zone_size));
255 * The memory size has already been determined. If we need
256 * to do anything fancy with the allocation of this memory
257 * to the zones, now is the time to do it.
259 zone_size[0] = max_low - min;
260 #ifdef CONFIG_HIGHMEM
261 zone_size[ZONE_HIGHMEM] = max_high - max_low;
265 * Calculate the size of the holes.
266 * holes = node_size - sum(bank_sizes)
268 memcpy(zhole_size, zone_size, sizeof(zhole_size));
269 for_each_memblock(memory, reg) {
270 unsigned long start = memblock_region_memory_base_pfn(reg);
271 unsigned long end = memblock_region_memory_end_pfn(reg);
273 if (start < max_low) {
274 unsigned long low_end = min(end, max_low);
275 zhole_size[0] -= low_end - start;
277 #ifdef CONFIG_HIGHMEM
279 unsigned long high_start = max(start, max_low);
280 zhole_size[ZONE_HIGHMEM] -= end - high_start;
285 #ifdef CONFIG_ZONE_DMA
287 * Adjust the sizes according to any special requirements for
290 if (arm_dma_zone_size)
291 arm_adjust_dma_zone(zone_size, zhole_size,
292 arm_dma_zone_size >> PAGE_SHIFT);
295 free_area_init_node(0, zone_size, min, zhole_size);
298 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
299 int pfn_valid(unsigned long pfn)
301 return memblock_is_memory(__pfn_to_phys(pfn));
303 EXPORT_SYMBOL(pfn_valid);
306 #ifndef CONFIG_SPARSEMEM
307 static void arm_memory_present(void)
311 static void arm_memory_present(void)
313 struct memblock_region *reg;
315 for_each_memblock(memory, reg)
316 memory_present(0, memblock_region_memory_base_pfn(reg),
317 memblock_region_memory_end_pfn(reg));
321 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
326 for (i = 0; i < mi->nr_banks; i++)
327 memblock_add(mi->bank[i].start, mi->bank[i].size);
329 /* Register the kernel text, kernel data and initrd with memblock. */
330 #ifdef CONFIG_XIP_KERNEL
331 memblock_reserve(__pa(_sdata), _end - _sdata);
333 memblock_reserve(__pa(_stext), _end - _stext);
335 #ifdef CONFIG_BLK_DEV_INITRD
336 if (phys_initrd_size &&
337 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
338 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
339 phys_initrd_start, phys_initrd_size);
340 phys_initrd_start = phys_initrd_size = 0;
342 if (phys_initrd_size &&
343 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
344 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
345 phys_initrd_start, phys_initrd_size);
346 phys_initrd_start = phys_initrd_size = 0;
348 if (phys_initrd_size) {
349 memblock_reserve(phys_initrd_start, phys_initrd_size);
351 /* Now convert initrd to virtual addresses */
352 initrd_start = __phys_to_virt(phys_initrd_start);
353 initrd_end = initrd_start + phys_initrd_size;
357 arm_mm_memblock_reserve();
358 arm_dt_memblock_reserve();
360 /* reserve any platform specific memblock areas */
365 * reserve memory for DMA contigouos allocations,
366 * must come from DMA area inside low memory
368 dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
374 void __init bootmem_init(void)
376 unsigned long min, max_low, max_high;
378 max_low = max_high = 0;
380 find_limits(&min, &max_low, &max_high);
382 arm_bootmem_init(min, max_low);
385 * Sparsemem tries to allocate bootmem in memory_present(),
386 * so must be done after the fixed reservations
388 arm_memory_present();
391 * sparse_init() needs the bootmem allocator up and running.
396 * Now free the memory - free_area_init_node needs
397 * the sparse mem_map arrays initialized by sparse_init()
398 * for memmap_init_zone(), otherwise all PFNs are invalid.
400 arm_bootmem_free(min, max_low, max_high);
403 * This doesn't seem to be used by the Linux memory manager any
404 * more, but is used by ll_rw_block. If we can get rid of it, we
405 * also get rid of some of the stuff above as well.
407 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
408 * the system, not the maximum PFN.
410 max_low_pfn = max_low - PHYS_PFN_OFFSET;
411 max_pfn = max_high - PHYS_PFN_OFFSET;
414 static inline int free_area(unsigned long pfn, unsigned long end, char *s)
416 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
418 for (; pfn < end; pfn++) {
419 struct page *page = pfn_to_page(pfn);
420 ClearPageReserved(page);
421 init_page_count(page);
427 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
433 * Poison init memory with an undefined instruction (ARM) or a branch to an
434 * undefined instruction (Thumb).
436 static inline void poison_init_mem(void *s, size_t count)
439 for (; count != 0; count -= 4)
444 free_memmap(unsigned long start_pfn, unsigned long end_pfn)
446 struct page *start_pg, *end_pg;
447 unsigned long pg, pgend;
450 * Convert start_pfn/end_pfn to a struct page pointer.
452 start_pg = pfn_to_page(start_pfn - 1) + 1;
453 end_pg = pfn_to_page(end_pfn - 1) + 1;
456 * Convert to physical addresses, and
457 * round start upwards and end downwards.
459 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
460 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
463 * If there are free pages between these,
464 * free the section of the memmap array.
467 free_bootmem(pg, pgend - pg);
471 * The mem_map array can get very big. Free the unused area of the memory map.
473 static void __init free_unused_memmap(struct meminfo *mi)
475 unsigned long bank_start, prev_bank_end = 0;
479 * This relies on each bank being in address order.
480 * The banks are sorted previously in bootmem_init().
482 for_each_bank(i, mi) {
483 struct membank *bank = &mi->bank[i];
485 bank_start = bank_pfn_start(bank);
487 #ifdef CONFIG_SPARSEMEM
489 * Take care not to free memmap entries that don't exist
490 * due to SPARSEMEM sections which aren't present.
492 bank_start = min(bank_start,
493 ALIGN(prev_bank_end, PAGES_PER_SECTION));
496 * Align down here since the VM subsystem insists that the
497 * memmap entries are valid from the bank start aligned to
498 * MAX_ORDER_NR_PAGES.
500 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
503 * If we had a previous bank, and there is a space
504 * between the current bank and the previous, free it.
506 if (prev_bank_end && prev_bank_end < bank_start)
507 free_memmap(prev_bank_end, bank_start);
510 * Align up here since the VM subsystem insists that the
511 * memmap entries are valid from the bank end aligned to
512 * MAX_ORDER_NR_PAGES.
514 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
517 #ifdef CONFIG_SPARSEMEM
518 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
519 free_memmap(prev_bank_end,
520 ALIGN(prev_bank_end, PAGES_PER_SECTION));
524 static void __init free_highpages(void)
526 #ifdef CONFIG_HIGHMEM
527 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
528 struct memblock_region *mem, *res;
530 /* set highmem page free */
531 for_each_memblock(memory, mem) {
532 unsigned long start = memblock_region_memory_base_pfn(mem);
533 unsigned long end = memblock_region_memory_end_pfn(mem);
535 /* Ignore complete lowmem entries */
539 /* Truncate partial highmem entries */
543 /* Find and exclude any reserved regions */
544 for_each_memblock(reserved, res) {
545 unsigned long res_start, res_end;
547 res_start = memblock_region_reserved_base_pfn(res);
548 res_end = memblock_region_reserved_end_pfn(res);
552 if (res_start < start)
558 if (res_start != start)
559 totalhigh_pages += free_area(start, res_start,
566 /* And now free anything which remains */
568 totalhigh_pages += free_area(start, end, NULL);
570 totalram_pages += totalhigh_pages;
575 * mem_init() marks the free areas in the mem_map and tells us how much
576 * memory is free. This is done after various parts of the system have
577 * claimed their memory after the kernel image.
579 void __init mem_init(void)
581 unsigned long reserved_pages, free_pages;
582 struct memblock_region *reg;
584 #ifdef CONFIG_HAVE_TCM
585 /* These pointers are filled in on TCM detection */
590 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
592 /* this will put all unused low memory onto the freelists */
593 free_unused_memmap(&meminfo);
595 totalram_pages += free_all_bootmem();
598 /* now that our DMA memory is actually so designated, we can free it */
599 totalram_pages += free_area(PHYS_PFN_OFFSET,
600 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
605 reserved_pages = free_pages = 0;
607 for_each_bank(i, &meminfo) {
608 struct membank *bank = &meminfo.bank[i];
609 unsigned int pfn1, pfn2;
610 struct page *page, *end;
612 pfn1 = bank_pfn_start(bank);
613 pfn2 = bank_pfn_end(bank);
615 page = pfn_to_page(pfn1);
616 end = pfn_to_page(pfn2 - 1) + 1;
619 if (PageReserved(page))
621 else if (!page_count(page))
624 } while (page < end);
628 * Since our memory may not be contiguous, calculate the
629 * real number of pages we have in this system
631 printk(KERN_INFO "Memory:");
633 for_each_memblock(memory, reg) {
634 unsigned long pages = memblock_region_memory_end_pfn(reg) -
635 memblock_region_memory_base_pfn(reg);
636 num_physpages += pages;
637 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
639 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
641 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
642 nr_free_pages() << (PAGE_SHIFT-10),
643 free_pages << (PAGE_SHIFT-10),
644 reserved_pages << (PAGE_SHIFT-10),
645 totalhigh_pages << (PAGE_SHIFT-10));
647 #define MLK(b, t) b, t, ((t) - (b)) >> 10
648 #define MLM(b, t) b, t, ((t) - (b)) >> 20
649 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
651 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
652 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
653 #ifdef CONFIG_HAVE_TCM
654 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
655 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
657 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
658 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
659 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
660 #ifdef CONFIG_HIGHMEM
661 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
663 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
664 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
665 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
666 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
667 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
669 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
671 #ifdef CONFIG_HAVE_TCM
672 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
673 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
675 MLK(FIXADDR_START, FIXADDR_TOP),
676 MLM(VMALLOC_START, VMALLOC_END),
677 MLM(PAGE_OFFSET, (unsigned long)high_memory),
678 #ifdef CONFIG_HIGHMEM
679 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
682 MLM(MODULES_VADDR, MODULES_END),
684 MLK_ROUNDUP(_text, _etext),
685 MLK_ROUNDUP(__init_begin, __init_end),
686 MLK_ROUNDUP(_sdata, _edata),
687 MLK_ROUNDUP(__bss_start, __bss_stop));
694 * Check boundaries twice: Some fundamental inconsistencies can
695 * be detected at build time already.
698 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
699 BUG_ON(TASK_SIZE > MODULES_VADDR);
702 #ifdef CONFIG_HIGHMEM
703 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
704 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
707 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
708 extern int sysctl_overcommit_memory;
710 * On a machine this small we won't get
711 * anywhere without overcommit, so turn
714 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
718 void free_initmem(void)
720 #ifdef CONFIG_HAVE_TCM
721 extern char __tcm_start, __tcm_end;
723 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
724 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
725 __phys_to_pfn(__pa(&__tcm_end)),
729 poison_init_mem(__init_begin, __init_end - __init_begin);
730 if (!machine_is_integrator() && !machine_is_cintegrator())
731 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
732 __phys_to_pfn(__pa(__init_end)),
736 #ifdef CONFIG_BLK_DEV_INITRD
738 static int keep_initrd;
740 void free_initrd_mem(unsigned long start, unsigned long end)
743 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
744 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
745 __phys_to_pfn(__pa(end)),
750 static int __init keepinitrd_setup(char *__unused)
756 __setup("keepinitrd", keepinitrd_setup);