Merge branch 'for-linus' of git://neil.brown.name/md
[pandora-kernel.git] / arch / powerpc / mm / mem.c
1 /*
2  *  PowerPC version
3  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4  *
5  *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6  *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
7  *    Copyright (C) 1996 Paul Mackerras
8  *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9  *
10  *  Derived from "arch/i386/mm/init.c"
11  *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
12  *
13  *  This program is free software; you can redistribute it and/or
14  *  modify it under the terms of the GNU General Public License
15  *  as published by the Free Software Foundation; either version
16  *  2 of the License, or (at your option) any later version.
17  *
18  */
19
20 #include <linux/module.h>
21 #include <linux/sched.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/string.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/stddef.h>
28 #include <linux/init.h>
29 #include <linux/bootmem.h>
30 #include <linux/highmem.h>
31 #include <linux/initrd.h>
32 #include <linux/pagemap.h>
33 #include <linux/suspend.h>
34 #include <linux/lmb.h>
35
36 #include <asm/pgalloc.h>
37 #include <asm/prom.h>
38 #include <asm/io.h>
39 #include <asm/mmu_context.h>
40 #include <asm/pgtable.h>
41 #include <asm/mmu.h>
42 #include <asm/smp.h>
43 #include <asm/machdep.h>
44 #include <asm/btext.h>
45 #include <asm/tlb.h>
46 #include <asm/sections.h>
47 #include <asm/sparsemem.h>
48 #include <asm/vdso.h>
49 #include <asm/fixmap.h>
50
51 #include "mmu_decl.h"
52
53 #ifndef CPU_FTR_COHERENT_ICACHE
54 #define CPU_FTR_COHERENT_ICACHE 0       /* XXX for now */
55 #define CPU_FTR_NOEXECUTE       0
56 #endif
57
58 int init_bootmem_done;
59 int mem_init_done;
60 unsigned long memory_limit;
61
62 #ifdef CONFIG_HIGHMEM
63 pte_t *kmap_pte;
64 pgprot_t kmap_prot;
65
66 EXPORT_SYMBOL(kmap_prot);
67 EXPORT_SYMBOL(kmap_pte);
68
69 static inline pte_t *virt_to_kpte(unsigned long vaddr)
70 {
71         return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
72                         vaddr), vaddr), vaddr);
73 }
74 #endif
75
76 int page_is_ram(unsigned long pfn)
77 {
78         unsigned long paddr = (pfn << PAGE_SHIFT);
79
80 #ifndef CONFIG_PPC64    /* XXX for now */
81         return paddr < __pa(high_memory);
82 #else
83         int i;
84         for (i=0; i < lmb.memory.cnt; i++) {
85                 unsigned long base;
86
87                 base = lmb.memory.region[i].base;
88
89                 if ((paddr >= base) &&
90                         (paddr < (base + lmb.memory.region[i].size))) {
91                         return 1;
92                 }
93         }
94
95         return 0;
96 #endif
97 }
98
99 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
100                               unsigned long size, pgprot_t vma_prot)
101 {
102         if (ppc_md.phys_mem_access_prot)
103                 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
104
105         if (!page_is_ram(pfn))
106                 vma_prot = __pgprot(pgprot_val(vma_prot)
107                                     | _PAGE_GUARDED | _PAGE_NO_CACHE);
108         return vma_prot;
109 }
110 EXPORT_SYMBOL(phys_mem_access_prot);
111
112 #ifdef CONFIG_MEMORY_HOTPLUG
113
114 #ifdef CONFIG_NUMA
115 int memory_add_physaddr_to_nid(u64 start)
116 {
117         return hot_add_scn_to_nid(start);
118 }
119 #endif
120
121 int arch_add_memory(int nid, u64 start, u64 size)
122 {
123         struct pglist_data *pgdata;
124         struct zone *zone;
125         unsigned long start_pfn = start >> PAGE_SHIFT;
126         unsigned long nr_pages = size >> PAGE_SHIFT;
127
128         pgdata = NODE_DATA(nid);
129
130         start = (unsigned long)__va(start);
131         create_section_mapping(start, start + size);
132
133         /* this should work for most non-highmem platforms */
134         zone = pgdata->node_zones;
135
136         return __add_pages(zone, start_pfn, nr_pages);
137 }
138
139 #ifdef CONFIG_MEMORY_HOTREMOVE
140 int remove_memory(u64 start, u64 size)
141 {
142         unsigned long start_pfn, end_pfn;
143         int ret;
144
145         start_pfn = start >> PAGE_SHIFT;
146         end_pfn = start_pfn + (size >> PAGE_SHIFT);
147         ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
148         if (ret)
149                 goto out;
150         /* Arch-specific calls go here - next patch */
151 out:
152         return ret;
153 }
154 #endif /* CONFIG_MEMORY_HOTREMOVE */
155 #endif /* CONFIG_MEMORY_HOTPLUG */
156
157 /*
158  * walk_memory_resource() needs to make sure there is no holes in a given
159  * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
160  * Instead it maintains it in lmb.memory structures.  Walk through the
161  * memory regions, find holes and callback for contiguous regions.
162  */
163 int
164 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
165                         int (*func)(unsigned long, unsigned long, void *))
166 {
167         struct lmb_property res;
168         unsigned long pfn, len;
169         u64 end;
170         int ret = -1;
171
172         res.base = (u64) start_pfn << PAGE_SHIFT;
173         res.size = (u64) nr_pages << PAGE_SHIFT;
174
175         end = res.base + res.size - 1;
176         while ((res.base < end) && (lmb_find(&res) >= 0)) {
177                 pfn = (unsigned long)(res.base >> PAGE_SHIFT);
178                 len = (unsigned long)(res.size >> PAGE_SHIFT);
179                 ret = (*func)(pfn, len, arg);
180                 if (ret)
181                         break;
182                 res.base += (res.size + 1);
183                 res.size = (end - res.base + 1);
184         }
185         return ret;
186 }
187 EXPORT_SYMBOL_GPL(walk_memory_resource);
188
189 void show_mem(void)
190 {
191         unsigned long total = 0, reserved = 0;
192         unsigned long shared = 0, cached = 0;
193         unsigned long highmem = 0;
194         struct page *page;
195         pg_data_t *pgdat;
196         unsigned long i;
197
198         printk("Mem-info:\n");
199         show_free_areas();
200         for_each_online_pgdat(pgdat) {
201                 unsigned long flags;
202                 pgdat_resize_lock(pgdat, &flags);
203                 for (i = 0; i < pgdat->node_spanned_pages; i++) {
204                         if (!pfn_valid(pgdat->node_start_pfn + i))
205                                 continue;
206                         page = pgdat_page_nr(pgdat, i);
207                         total++;
208                         if (PageHighMem(page))
209                                 highmem++;
210                         if (PageReserved(page))
211                                 reserved++;
212                         else if (PageSwapCache(page))
213                                 cached++;
214                         else if (page_count(page))
215                                 shared += page_count(page) - 1;
216                 }
217                 pgdat_resize_unlock(pgdat, &flags);
218         }
219         printk("%ld pages of RAM\n", total);
220 #ifdef CONFIG_HIGHMEM
221         printk("%ld pages of HIGHMEM\n", highmem);
222 #endif
223         printk("%ld reserved pages\n", reserved);
224         printk("%ld pages shared\n", shared);
225         printk("%ld pages swap cached\n", cached);
226 }
227
228 /*
229  * Initialize the bootmem system and give it all the memory we
230  * have available.  If we are using highmem, we only put the
231  * lowmem into the bootmem system.
232  */
233 #ifndef CONFIG_NEED_MULTIPLE_NODES
234 void __init do_init_bootmem(void)
235 {
236         unsigned long i;
237         unsigned long start, bootmap_pages;
238         unsigned long total_pages;
239         int boot_mapsize;
240
241         max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
242         total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
243 #ifdef CONFIG_HIGHMEM
244         total_pages = total_lowmem >> PAGE_SHIFT;
245         max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
246 #endif
247
248         /*
249          * Find an area to use for the bootmem bitmap.  Calculate the size of
250          * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
251          * Add 1 additional page in case the address isn't page-aligned.
252          */
253         bootmap_pages = bootmem_bootmap_pages(total_pages);
254
255         start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
256
257         min_low_pfn = MEMORY_START >> PAGE_SHIFT;
258         boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
259
260         /* Add active regions with valid PFNs */
261         for (i = 0; i < lmb.memory.cnt; i++) {
262                 unsigned long start_pfn, end_pfn;
263                 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
264                 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
265                 add_active_range(0, start_pfn, end_pfn);
266         }
267
268         /* Add all physical memory to the bootmem map, mark each area
269          * present.
270          */
271 #ifdef CONFIG_HIGHMEM
272         free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
273
274         /* reserve the sections we're already using */
275         for (i = 0; i < lmb.reserved.cnt; i++) {
276                 unsigned long addr = lmb.reserved.region[i].base +
277                                      lmb_size_bytes(&lmb.reserved, i) - 1;
278                 if (addr < lowmem_end_addr)
279                         reserve_bootmem(lmb.reserved.region[i].base,
280                                         lmb_size_bytes(&lmb.reserved, i),
281                                         BOOTMEM_DEFAULT);
282                 else if (lmb.reserved.region[i].base < lowmem_end_addr) {
283                         unsigned long adjusted_size = lowmem_end_addr -
284                                       lmb.reserved.region[i].base;
285                         reserve_bootmem(lmb.reserved.region[i].base,
286                                         adjusted_size, BOOTMEM_DEFAULT);
287                 }
288         }
289 #else
290         free_bootmem_with_active_regions(0, max_pfn);
291
292         /* reserve the sections we're already using */
293         for (i = 0; i < lmb.reserved.cnt; i++)
294                 reserve_bootmem(lmb.reserved.region[i].base,
295                                 lmb_size_bytes(&lmb.reserved, i),
296                                 BOOTMEM_DEFAULT);
297
298 #endif
299         /* XXX need to clip this if using highmem? */
300         sparse_memory_present_with_active_regions(0);
301
302         init_bootmem_done = 1;
303 }
304
305 /* mark pages that don't exist as nosave */
306 static int __init mark_nonram_nosave(void)
307 {
308         unsigned long lmb_next_region_start_pfn,
309                       lmb_region_max_pfn;
310         int i;
311
312         for (i = 0; i < lmb.memory.cnt - 1; i++) {
313                 lmb_region_max_pfn =
314                         (lmb.memory.region[i].base >> PAGE_SHIFT) +
315                         (lmb.memory.region[i].size >> PAGE_SHIFT);
316                 lmb_next_region_start_pfn =
317                         lmb.memory.region[i+1].base >> PAGE_SHIFT;
318
319                 if (lmb_region_max_pfn < lmb_next_region_start_pfn)
320                         register_nosave_region(lmb_region_max_pfn,
321                                                lmb_next_region_start_pfn);
322         }
323
324         return 0;
325 }
326
327 /*
328  * paging_init() sets up the page tables - in fact we've already done this.
329  */
330 void __init paging_init(void)
331 {
332         unsigned long total_ram = lmb_phys_mem_size();
333         phys_addr_t top_of_ram = lmb_end_of_DRAM();
334         unsigned long max_zone_pfns[MAX_NR_ZONES];
335
336 #ifdef CONFIG_PPC32
337         unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
338         unsigned long end = __fix_to_virt(FIX_HOLE);
339
340         for (; v < end; v += PAGE_SIZE)
341                 map_page(v, 0, 0); /* XXX gross */
342 #endif
343
344 #ifdef CONFIG_HIGHMEM
345         map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
346         pkmap_page_table = virt_to_kpte(PKMAP_BASE);
347
348         kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
349         kmap_prot = PAGE_KERNEL;
350 #endif /* CONFIG_HIGHMEM */
351
352         printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
353                (u64)top_of_ram, total_ram);
354         printk(KERN_DEBUG "Memory hole size: %ldMB\n",
355                (long int)((top_of_ram - total_ram) >> 20));
356         memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
357 #ifdef CONFIG_HIGHMEM
358         max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
359         max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
360 #else
361         max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
362 #endif
363         free_area_init_nodes(max_zone_pfns);
364
365         mark_nonram_nosave();
366 }
367 #endif /* ! CONFIG_NEED_MULTIPLE_NODES */
368
369 void __init mem_init(void)
370 {
371 #ifdef CONFIG_NEED_MULTIPLE_NODES
372         int nid;
373 #endif
374         pg_data_t *pgdat;
375         unsigned long i;
376         struct page *page;
377         unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
378
379         num_physpages = lmb.memory.size >> PAGE_SHIFT;
380         high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
381
382 #ifdef CONFIG_NEED_MULTIPLE_NODES
383         for_each_online_node(nid) {
384                 if (NODE_DATA(nid)->node_spanned_pages != 0) {
385                         printk("freeing bootmem node %d\n", nid);
386                         totalram_pages +=
387                                 free_all_bootmem_node(NODE_DATA(nid));
388                 }
389         }
390 #else
391         max_mapnr = max_pfn;
392         totalram_pages += free_all_bootmem();
393 #endif
394         for_each_online_pgdat(pgdat) {
395                 for (i = 0; i < pgdat->node_spanned_pages; i++) {
396                         if (!pfn_valid(pgdat->node_start_pfn + i))
397                                 continue;
398                         page = pgdat_page_nr(pgdat, i);
399                         if (PageReserved(page))
400                                 reservedpages++;
401                 }
402         }
403
404         codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
405         datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
406         initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
407         bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
408
409 #ifdef CONFIG_HIGHMEM
410         {
411                 unsigned long pfn, highmem_mapnr;
412
413                 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
414                 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
415                         struct page *page = pfn_to_page(pfn);
416                         if (lmb_is_reserved(pfn << PAGE_SHIFT))
417                                 continue;
418                         ClearPageReserved(page);
419                         init_page_count(page);
420                         __free_page(page);
421                         totalhigh_pages++;
422                         reservedpages--;
423                 }
424                 totalram_pages += totalhigh_pages;
425                 printk(KERN_DEBUG "High memory: %luk\n",
426                        totalhigh_pages << (PAGE_SHIFT-10));
427         }
428 #endif /* CONFIG_HIGHMEM */
429
430         printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
431                "%luk reserved, %luk data, %luk bss, %luk init)\n",
432                 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
433                 num_physpages << (PAGE_SHIFT-10),
434                 codesize >> 10,
435                 reservedpages << (PAGE_SHIFT-10),
436                 datasize >> 10,
437                 bsssize >> 10,
438                 initsize >> 10);
439
440         mem_init_done = 1;
441 }
442
443 /*
444  * This is called when a page has been modified by the kernel.
445  * It just marks the page as not i-cache clean.  We do the i-cache
446  * flush later when the page is given to a user process, if necessary.
447  */
448 void flush_dcache_page(struct page *page)
449 {
450         if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
451                 return;
452         /* avoid an atomic op if possible */
453         if (test_bit(PG_arch_1, &page->flags))
454                 clear_bit(PG_arch_1, &page->flags);
455 }
456 EXPORT_SYMBOL(flush_dcache_page);
457
458 void flush_dcache_icache_page(struct page *page)
459 {
460 #ifdef CONFIG_BOOKE
461         void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
462         __flush_dcache_icache(start);
463         kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
464 #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
465         /* On 8xx there is no need to kmap since highmem is not supported */
466         __flush_dcache_icache(page_address(page)); 
467 #else
468         __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
469 #endif
470
471 }
472 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
473 {
474         clear_page(page);
475
476         /*
477          * We shouldnt have to do this, but some versions of glibc
478          * require it (ld.so assumes zero filled pages are icache clean)
479          * - Anton
480          */
481         flush_dcache_page(pg);
482 }
483 EXPORT_SYMBOL(clear_user_page);
484
485 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
486                     struct page *pg)
487 {
488         copy_page(vto, vfrom);
489
490         /*
491          * We should be able to use the following optimisation, however
492          * there are two problems.
493          * Firstly a bug in some versions of binutils meant PLT sections
494          * were not marked executable.
495          * Secondly the first word in the GOT section is blrl, used
496          * to establish the GOT address. Until recently the GOT was
497          * not marked executable.
498          * - Anton
499          */
500 #if 0
501         if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
502                 return;
503 #endif
504
505         flush_dcache_page(pg);
506 }
507
508 void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
509                              unsigned long addr, int len)
510 {
511         unsigned long maddr;
512
513         maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
514         flush_icache_range(maddr, maddr + len);
515         kunmap(page);
516 }
517 EXPORT_SYMBOL(flush_icache_user_range);
518
519 /*
520  * This is called at the end of handling a user page fault, when the
521  * fault has been handled by updating a PTE in the linux page tables.
522  * We use it to preload an HPTE into the hash table corresponding to
523  * the updated linux PTE.
524  * 
525  * This must always be called with the pte lock held.
526  */
527 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
528                       pte_t pte)
529 {
530 #ifdef CONFIG_PPC_STD_MMU
531         unsigned long access = 0, trap;
532 #endif
533         unsigned long pfn = pte_pfn(pte);
534
535         /* handle i-cache coherency */
536         if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
537             !cpu_has_feature(CPU_FTR_NOEXECUTE) &&
538             pfn_valid(pfn)) {
539                 struct page *page = pfn_to_page(pfn);
540 #ifdef CONFIG_8xx
541                 /* On 8xx, cache control instructions (particularly
542                  * "dcbst" from flush_dcache_icache) fault as write
543                  * operation if there is an unpopulated TLB entry
544                  * for the address in question. To workaround that,
545                  * we invalidate the TLB here, thus avoiding dcbst
546                  * misbehaviour.
547                  */
548                 _tlbie(address, 0 /* 8xx doesn't care about PID */);
549 #endif
550                 /* The _PAGE_USER test should really be _PAGE_EXEC, but
551                  * older glibc versions execute some code from no-exec
552                  * pages, which for now we are supporting.  If exec-only
553                  * pages are ever implemented, this will have to change.
554                  */
555                 if (!PageReserved(page) && (pte_val(pte) & _PAGE_USER)
556                     && !test_bit(PG_arch_1, &page->flags)) {
557                         if (vma->vm_mm == current->active_mm) {
558                                 __flush_dcache_icache((void *) address);
559                         } else
560                                 flush_dcache_icache_page(page);
561                         set_bit(PG_arch_1, &page->flags);
562                 }
563         }
564
565 #ifdef CONFIG_PPC_STD_MMU
566         /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
567         if (!pte_young(pte) || address >= TASK_SIZE)
568                 return;
569
570         /* We try to figure out if we are coming from an instruction
571          * access fault and pass that down to __hash_page so we avoid
572          * double-faulting on execution of fresh text. We have to test
573          * for regs NULL since init will get here first thing at boot
574          *
575          * We also avoid filling the hash if not coming from a fault
576          */
577         if (current->thread.regs == NULL)
578                 return;
579         trap = TRAP(current->thread.regs);
580         if (trap == 0x400)
581                 access |= _PAGE_EXEC;
582         else if (trap != 0x300)
583                 return;
584         hash_preload(vma->vm_mm, address, access, trap);
585 #endif /* CONFIG_PPC_STD_MMU */
586 }