Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/linville/wireles...
[pandora-kernel.git] / arch / avr32 / mm / init.c
1 /*
2  * Copyright (C) 2004-2006 Atmel Corporation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/mmzone.h>
14 #include <linux/bootmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/nodemask.h>
17
18 #include <asm/page.h>
19 #include <asm/mmu_context.h>
20 #include <asm/tlb.h>
21 #include <asm/io.h>
22 #include <asm/dma.h>
23 #include <asm/setup.h>
24 #include <asm/sections.h>
25
26 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
27
28 pgd_t swapper_pg_dir[PTRS_PER_PGD];
29
30 struct page *empty_zero_page;
31
32 /*
33  * Cache of MMU context last used.
34  */
35 unsigned long mmu_context_cache = NO_CONTEXT;
36
37 void show_mem(void)
38 {
39         int total = 0, reserved = 0, cached = 0;
40         int slab = 0, free = 0, shared = 0;
41         pg_data_t *pgdat;
42
43         printk("Mem-info:\n");
44         show_free_areas();
45
46         for_each_online_pgdat(pgdat) {
47                 struct page *page, *end;
48
49                 page = pgdat->node_mem_map;
50                 end = page + pgdat->node_spanned_pages;
51
52                 do {
53                         total++;
54                         if (PageReserved(page))
55                                 reserved++;
56                         else if (PageSwapCache(page))
57                                 cached++;
58                         else if (PageSlab(page))
59                                 slab++;
60                         else if (!page_count(page))
61                                 free++;
62                         else
63                                 shared += page_count(page) - 1;
64                         page++;
65                 } while (page < end);
66         }
67
68         printk ("%d pages of RAM\n", total);
69         printk ("%d free pages\n", free);
70         printk ("%d reserved pages\n", reserved);
71         printk ("%d slab pages\n", slab);
72         printk ("%d pages shared\n", shared);
73         printk ("%d pages swap cached\n", cached);
74 }
75
76 /*
77  * paging_init() sets up the page tables
78  *
79  * This routine also unmaps the page at virtual kernel address 0, so
80  * that we can trap those pesky NULL-reference errors in the kernel.
81  */
82 void __init paging_init(void)
83 {
84         extern unsigned long _evba;
85         void *zero_page;
86         int nid;
87
88         /*
89          * Make sure we can handle exceptions before enabling
90          * paging. Not that we should ever _get_ any exceptions this
91          * early, but you never know...
92          */
93         printk("Exception vectors start at %p\n", &_evba);
94         sysreg_write(EVBA, (unsigned long)&_evba);
95
96         /*
97          * Since we are ready to handle exceptions now, we should let
98          * the CPU generate them...
99          */
100         __asm__ __volatile__ ("csrf %0" : : "i"(SR_EM_BIT));
101
102         /*
103          * Allocate the zero page. The allocator will panic if it
104          * can't satisfy the request, so no need to check.
105          */
106         zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0),
107                                                  PAGE_SIZE);
108
109         {
110                 pgd_t *pg_dir;
111                 int i;
112
113                 pg_dir = swapper_pg_dir;
114                 sysreg_write(PTBR, (unsigned long)pg_dir);
115
116                 for (i = 0; i < PTRS_PER_PGD; i++)
117                         pgd_val(pg_dir[i]) = 0;
118
119                 enable_mmu();
120                 printk ("CPU: Paging enabled\n");
121         }
122
123         for_each_online_node(nid) {
124                 pg_data_t *pgdat = NODE_DATA(nid);
125                 unsigned long zones_size[MAX_NR_ZONES];
126                 unsigned long low, start_pfn;
127
128                 start_pfn = pgdat->bdata->node_boot_start;
129                 start_pfn >>= PAGE_SHIFT;
130                 low = pgdat->bdata->node_low_pfn;
131
132                 memset(zones_size, 0, sizeof(zones_size));
133                 zones_size[ZONE_NORMAL] = low - start_pfn;
134
135                 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
136                        nid, start_pfn, low);
137
138                 free_area_init_node(nid, pgdat, zones_size, start_pfn, NULL);
139
140                 printk("Node %u: mem_map starts at %p\n",
141                        pgdat->node_id, pgdat->node_mem_map);
142         }
143
144         mem_map = NODE_DATA(0)->node_mem_map;
145
146         memset(zero_page, 0, PAGE_SIZE);
147         empty_zero_page = virt_to_page(zero_page);
148         flush_dcache_page(empty_zero_page);
149 }
150
151 void __init mem_init(void)
152 {
153         int codesize, reservedpages, datasize, initsize;
154         int nid, i;
155
156         reservedpages = 0;
157         high_memory = NULL;
158
159         /* this will put all low memory onto the freelists */
160         for_each_online_node(nid) {
161                 pg_data_t *pgdat = NODE_DATA(nid);
162                 unsigned long node_pages = 0;
163                 void *node_high_memory;
164
165                 num_physpages += pgdat->node_present_pages;
166
167                 if (pgdat->node_spanned_pages != 0)
168                         node_pages = free_all_bootmem_node(pgdat);
169
170                 totalram_pages += node_pages;
171
172                 for (i = 0; i < node_pages; i++)
173                         if (PageReserved(pgdat->node_mem_map + i))
174                                 reservedpages++;
175
176                 node_high_memory = (void *)((pgdat->node_start_pfn
177                                              + pgdat->node_spanned_pages)
178                                             << PAGE_SHIFT);
179                 if (node_high_memory > high_memory)
180                         high_memory = node_high_memory;
181         }
182
183         max_mapnr = MAP_NR(high_memory);
184
185         codesize = (unsigned long)_etext - (unsigned long)_text;
186         datasize = (unsigned long)_edata - (unsigned long)_data;
187         initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
188
189         printk ("Memory: %luk/%luk available (%dk kernel code, "
190                 "%dk reserved, %dk data, %dk init)\n",
191                 (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
192                 totalram_pages << (PAGE_SHIFT - 10),
193                 codesize >> 10,
194                 reservedpages << (PAGE_SHIFT - 10),
195                 datasize >> 10,
196                 initsize >> 10);
197 }
198
199 static inline void free_area(unsigned long addr, unsigned long end, char *s)
200 {
201         unsigned int size = (end - addr) >> 10;
202
203         for (; addr < end; addr += PAGE_SIZE) {
204                 struct page *page = virt_to_page(addr);
205                 ClearPageReserved(page);
206                 init_page_count(page);
207                 free_page(addr);
208                 totalram_pages++;
209         }
210
211         if (size && s)
212                 printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n",
213                        s, size, end - (size << 10), end);
214 }
215
216 void free_initmem(void)
217 {
218         free_area((unsigned long)__init_begin, (unsigned long)__init_end,
219                   "init");
220 }
221
222 #ifdef CONFIG_BLK_DEV_INITRD
223
224 void free_initrd_mem(unsigned long start, unsigned long end)
225 {
226         free_area(start, end, "initrd");
227 }
228
229 #endif