Merge branch 'semaphore' of git://git.kernel.org/pub/scm/linux/kernel/git/willy/misc
[pandora-kernel.git] / arch / avr32 / mm / init.c
1 /*
2  * Copyright (C) 2004-2006 Atmel Corporation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/mm.h>
11 #include <linux/swap.h>
12 #include <linux/init.h>
13 #include <linux/mmzone.h>
14 #include <linux/module.h>
15 #include <linux/bootmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/nodemask.h>
18
19 #include <asm/page.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlb.h>
22 #include <asm/io.h>
23 #include <asm/dma.h>
24 #include <asm/setup.h>
25 #include <asm/sections.h>
26
27 #define __page_aligned  __attribute__((section(".data.page_aligned")))
28
29 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
30
31 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned;
32
33 struct page *empty_zero_page;
34 EXPORT_SYMBOL(empty_zero_page);
35
36 /*
37  * Cache of MMU context last used.
38  */
39 unsigned long mmu_context_cache = NO_CONTEXT;
40
41 void show_mem(void)
42 {
43         int total = 0, reserved = 0, cached = 0;
44         int slab = 0, free = 0, shared = 0;
45         pg_data_t *pgdat;
46
47         printk("Mem-info:\n");
48         show_free_areas();
49
50         for_each_online_pgdat(pgdat) {
51                 struct page *page, *end;
52
53                 page = pgdat->node_mem_map;
54                 end = page + pgdat->node_spanned_pages;
55
56                 do {
57                         total++;
58                         if (PageReserved(page))
59                                 reserved++;
60                         else if (PageSwapCache(page))
61                                 cached++;
62                         else if (PageSlab(page))
63                                 slab++;
64                         else if (!page_count(page))
65                                 free++;
66                         else
67                                 shared += page_count(page) - 1;
68                         page++;
69                 } while (page < end);
70         }
71
72         printk ("%d pages of RAM\n", total);
73         printk ("%d free pages\n", free);
74         printk ("%d reserved pages\n", reserved);
75         printk ("%d slab pages\n", slab);
76         printk ("%d pages shared\n", shared);
77         printk ("%d pages swap cached\n", cached);
78 }
79
80 /*
81  * paging_init() sets up the page tables
82  *
83  * This routine also unmaps the page at virtual kernel address 0, so
84  * that we can trap those pesky NULL-reference errors in the kernel.
85  */
86 void __init paging_init(void)
87 {
88         extern unsigned long _evba;
89         void *zero_page;
90         int nid;
91
92         /*
93          * Make sure we can handle exceptions before enabling
94          * paging. Not that we should ever _get_ any exceptions this
95          * early, but you never know...
96          */
97         printk("Exception vectors start at %p\n", &_evba);
98         sysreg_write(EVBA, (unsigned long)&_evba);
99
100         /*
101          * Since we are ready to handle exceptions now, we should let
102          * the CPU generate them...
103          */
104         __asm__ __volatile__ ("csrf %0" : : "i"(SR_EM_BIT));
105
106         /*
107          * Allocate the zero page. The allocator will panic if it
108          * can't satisfy the request, so no need to check.
109          */
110         zero_page = alloc_bootmem_low_pages_node(NODE_DATA(0),
111                                                  PAGE_SIZE);
112
113         sysreg_write(PTBR, (unsigned long)swapper_pg_dir);
114         enable_mmu();
115         printk ("CPU: Paging enabled\n");
116
117         for_each_online_node(nid) {
118                 pg_data_t *pgdat = NODE_DATA(nid);
119                 unsigned long zones_size[MAX_NR_ZONES];
120                 unsigned long low, start_pfn;
121
122                 start_pfn = pgdat->bdata->node_min_pfn;
123                 low = pgdat->bdata->node_low_pfn;
124
125                 memset(zones_size, 0, sizeof(zones_size));
126                 zones_size[ZONE_NORMAL] = low - start_pfn;
127
128                 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
129                        nid, start_pfn, low);
130
131                 free_area_init_node(nid, zones_size, start_pfn, NULL);
132
133                 printk("Node %u: mem_map starts at %p\n",
134                        pgdat->node_id, pgdat->node_mem_map);
135         }
136
137         mem_map = NODE_DATA(0)->node_mem_map;
138
139         memset(zero_page, 0, PAGE_SIZE);
140         empty_zero_page = virt_to_page(zero_page);
141         flush_dcache_page(empty_zero_page);
142 }
143
144 void __init mem_init(void)
145 {
146         int codesize, reservedpages, datasize, initsize;
147         int nid, i;
148
149         reservedpages = 0;
150         high_memory = NULL;
151
152         /* this will put all low memory onto the freelists */
153         for_each_online_node(nid) {
154                 pg_data_t *pgdat = NODE_DATA(nid);
155                 unsigned long node_pages = 0;
156                 void *node_high_memory;
157
158                 num_physpages += pgdat->node_present_pages;
159
160                 if (pgdat->node_spanned_pages != 0)
161                         node_pages = free_all_bootmem_node(pgdat);
162
163                 totalram_pages += node_pages;
164
165                 for (i = 0; i < node_pages; i++)
166                         if (PageReserved(pgdat->node_mem_map + i))
167                                 reservedpages++;
168
169                 node_high_memory = (void *)((pgdat->node_start_pfn
170                                              + pgdat->node_spanned_pages)
171                                             << PAGE_SHIFT);
172                 if (node_high_memory > high_memory)
173                         high_memory = node_high_memory;
174         }
175
176         max_mapnr = MAP_NR(high_memory);
177
178         codesize = (unsigned long)_etext - (unsigned long)_text;
179         datasize = (unsigned long)_edata - (unsigned long)_data;
180         initsize = (unsigned long)__init_end - (unsigned long)__init_begin;
181
182         printk ("Memory: %luk/%luk available (%dk kernel code, "
183                 "%dk reserved, %dk data, %dk init)\n",
184                 (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
185                 totalram_pages << (PAGE_SHIFT - 10),
186                 codesize >> 10,
187                 reservedpages << (PAGE_SHIFT - 10),
188                 datasize >> 10,
189                 initsize >> 10);
190 }
191
192 static inline void free_area(unsigned long addr, unsigned long end, char *s)
193 {
194         unsigned int size = (end - addr) >> 10;
195
196         for (; addr < end; addr += PAGE_SIZE) {
197                 struct page *page = virt_to_page(addr);
198                 ClearPageReserved(page);
199                 init_page_count(page);
200                 free_page(addr);
201                 totalram_pages++;
202         }
203
204         if (size && s)
205                 printk(KERN_INFO "Freeing %s memory: %dK (%lx - %lx)\n",
206                        s, size, end - (size << 10), end);
207 }
208
209 void free_initmem(void)
210 {
211         free_area((unsigned long)__init_begin, (unsigned long)__init_end,
212                   "init");
213 }
214
215 #ifdef CONFIG_BLK_DEV_INITRD
216
217 void free_initrd_mem(unsigned long start, unsigned long end)
218 {
219         free_area(start, end, "initrd");
220 }
221
222 #endif