pandora: defconfig: update
[pandora-kernel.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #include "physaddr.h"
26
27 /*
28  * Fix up the linear direct mapping of the kernel to avoid cache attribute
29  * conflicts.
30  */
31 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32                                unsigned long prot_val)
33 {
34         unsigned long nrpages = size >> PAGE_SHIFT;
35         int err;
36
37         switch (prot_val) {
38         case _PAGE_CACHE_UC:
39         default:
40                 err = _set_memory_uc(vaddr, nrpages);
41                 break;
42         case _PAGE_CACHE_WC:
43                 err = _set_memory_wc(vaddr, nrpages);
44                 break;
45         case _PAGE_CACHE_WB:
46                 err = _set_memory_wb(vaddr, nrpages);
47                 break;
48         }
49
50         return err;
51 }
52
53 static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54                                void *arg)
55 {
56         unsigned long i;
57
58         for (i = 0; i < nr_pages; ++i)
59                 if (pfn_valid(start_pfn + i) &&
60                     !PageReserved(pfn_to_page(start_pfn + i)))
61                         return 1;
62
63         WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64
65         return 0;
66 }
67
68 /*
69  * Remap an arbitrary physical address space into the kernel virtual
70  * address space. Needed when the kernel wants to access high addresses
71  * directly.
72  *
73  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74  * have to convert them into an offset in a page-aligned mapping, but the
75  * caller shouldn't need to know that small detail.
76  */
77 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78                 unsigned long size, unsigned long prot_val, void *caller)
79 {
80         unsigned long offset, vaddr;
81         resource_size_t pfn, last_pfn, last_addr;
82         const resource_size_t unaligned_phys_addr = phys_addr;
83         const unsigned long unaligned_size = size;
84         struct vm_struct *area;
85         unsigned long new_prot_val;
86         pgprot_t prot;
87         int retval;
88         void __iomem *ret_addr;
89
90         /* Don't allow wraparound or zero size */
91         last_addr = phys_addr + size - 1;
92         if (!size || last_addr < phys_addr)
93                 return NULL;
94
95         if (!phys_addr_valid(phys_addr)) {
96                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
97                        (unsigned long long)phys_addr);
98                 WARN_ON_ONCE(1);
99                 return NULL;
100         }
101
102         /*
103          * Don't remap the low PCI/ISA area, it's always mapped..
104          */
105         if (is_ISA_range(phys_addr, last_addr))
106                 return (__force void __iomem *)phys_to_virt(phys_addr);
107
108         /*
109          * Don't allow anybody to remap normal RAM that we're using..
110          */
111         pfn      = phys_addr >> PAGE_SHIFT;
112         last_pfn = last_addr >> PAGE_SHIFT;
113         if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
114                                   __ioremap_check_ram) == 1)
115                 return NULL;
116
117         /*
118          * Mappings have to be page-aligned
119          */
120         offset = phys_addr & ~PAGE_MASK;
121         phys_addr &= PHYSICAL_PAGE_MASK;
122         size = PAGE_ALIGN(last_addr+1) - phys_addr;
123
124         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
125                                                 prot_val, &new_prot_val);
126         if (retval) {
127                 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
128                 return NULL;
129         }
130
131         if (prot_val != new_prot_val) {
132                 if (!is_new_memtype_allowed(phys_addr, size,
133                                             prot_val, new_prot_val)) {
134                         printk(KERN_ERR
135                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
136                                 (unsigned long long)phys_addr,
137                                 (unsigned long long)(phys_addr + size),
138                                 prot_val, new_prot_val);
139                         goto err_free_memtype;
140                 }
141                 prot_val = new_prot_val;
142         }
143
144         switch (prot_val) {
145         case _PAGE_CACHE_UC:
146         default:
147                 prot = PAGE_KERNEL_IO_NOCACHE;
148                 break;
149         case _PAGE_CACHE_UC_MINUS:
150                 prot = PAGE_KERNEL_IO_UC_MINUS;
151                 break;
152         case _PAGE_CACHE_WC:
153                 prot = PAGE_KERNEL_IO_WC;
154                 break;
155         case _PAGE_CACHE_WB:
156                 prot = PAGE_KERNEL_IO;
157                 break;
158         }
159
160         /*
161          * Ok, go for it..
162          */
163         area = get_vm_area_caller(size, VM_IOREMAP, caller);
164         if (!area)
165                 goto err_free_memtype;
166         area->phys_addr = phys_addr;
167         vaddr = (unsigned long) area->addr;
168
169         if (kernel_map_sync_memtype(phys_addr, size, prot_val))
170                 goto err_free_area;
171
172         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
173                 goto err_free_area;
174
175         ret_addr = (void __iomem *) (vaddr + offset);
176         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
177
178         /*
179          * Check if the request spans more than any BAR in the iomem resource
180          * tree.
181          */
182         WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
183                   KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
184
185         return ret_addr;
186 err_free_area:
187         free_vm_area(area);
188 err_free_memtype:
189         free_memtype(phys_addr, phys_addr + size);
190         return NULL;
191 }
192
193 /**
194  * ioremap_nocache     -   map bus memory into CPU space
195  * @offset:    bus address of the memory
196  * @size:      size of the resource to map
197  *
198  * ioremap_nocache performs a platform specific sequence of operations to
199  * make bus memory CPU accessible via the readb/readw/readl/writeb/
200  * writew/writel functions and the other mmio helpers. The returned
201  * address is not guaranteed to be usable directly as a virtual
202  * address.
203  *
204  * This version of ioremap ensures that the memory is marked uncachable
205  * on the CPU as well as honouring existing caching rules from things like
206  * the PCI bus. Note that there are other caches and buffers on many
207  * busses. In particular driver authors should read up on PCI writes
208  *
209  * It's useful if some control registers are in such an area and
210  * write combining or read caching is not desirable:
211  *
212  * Must be freed with iounmap.
213  */
214 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
215 {
216         /*
217          * Ideally, this should be:
218          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
219          *
220          * Till we fix all X drivers to use ioremap_wc(), we will use
221          * UC MINUS.
222          */
223         unsigned long val = _PAGE_CACHE_UC_MINUS;
224
225         return __ioremap_caller(phys_addr, size, val,
226                                 __builtin_return_address(0));
227 }
228 EXPORT_SYMBOL(ioremap_nocache);
229
230 /**
231  * ioremap_wc   -       map memory into CPU space write combined
232  * @offset:     bus address of the memory
233  * @size:       size of the resource to map
234  *
235  * This version of ioremap ensures that the memory is marked write combining.
236  * Write combining allows faster writes to some hardware devices.
237  *
238  * Must be freed with iounmap.
239  */
240 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
241 {
242         if (pat_enabled)
243                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
244                                         __builtin_return_address(0));
245         else
246                 return ioremap_nocache(phys_addr, size);
247 }
248 EXPORT_SYMBOL(ioremap_wc);
249
250 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
251 {
252         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
253                                 __builtin_return_address(0));
254 }
255 EXPORT_SYMBOL(ioremap_cache);
256
257 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
258                                 unsigned long prot_val)
259 {
260         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
261                                 __builtin_return_address(0));
262 }
263 EXPORT_SYMBOL(ioremap_prot);
264
265 /**
266  * iounmap - Free a IO remapping
267  * @addr: virtual address from ioremap_*
268  *
269  * Caller must ensure there is only one unmapping for the same pointer.
270  */
271 void iounmap(volatile void __iomem *addr)
272 {
273         struct vm_struct *p, *o;
274
275         if ((void __force *)addr <= high_memory)
276                 return;
277
278         /*
279          * __ioremap special-cases the PCI/ISA range by not instantiating a
280          * vm_area and by simply returning an address into the kernel mapping
281          * of ISA space.   So handle that here.
282          */
283         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
284             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
285                 return;
286
287         addr = (volatile void __iomem *)
288                 (PAGE_MASK & (unsigned long __force)addr);
289
290         mmiotrace_iounmap(addr);
291
292         /* Use the vm area unlocked, assuming the caller
293            ensures there isn't another iounmap for the same address
294            in parallel. Reuse of the virtual address is prevented by
295            leaving it in the global lists until we're done with it.
296            cpa takes care of the direct mappings. */
297         read_lock(&vmlist_lock);
298         for (p = vmlist; p; p = p->next) {
299                 if (p->addr == (void __force *)addr)
300                         break;
301         }
302         read_unlock(&vmlist_lock);
303
304         if (!p) {
305                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
306                 dump_stack();
307                 return;
308         }
309
310         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
311
312         /* Finally remove it */
313         o = remove_vm_area((void __force *)addr);
314         BUG_ON(p != o || o == NULL);
315         kfree(p);
316 }
317 EXPORT_SYMBOL(iounmap);
318
319 /*
320  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
321  * access
322  */
323 void *xlate_dev_mem_ptr(unsigned long phys)
324 {
325         void *addr;
326         unsigned long start = phys & PAGE_MASK;
327
328         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
329         if (page_is_ram(start >> PAGE_SHIFT))
330                 return __va(phys);
331
332         addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
333         if (addr)
334                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
335
336         return addr;
337 }
338
339 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
340 {
341         if (page_is_ram(phys >> PAGE_SHIFT))
342                 return;
343
344         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
345         return;
346 }
347
348 static int __initdata early_ioremap_debug;
349
350 static int __init early_ioremap_debug_setup(char *str)
351 {
352         early_ioremap_debug = 1;
353
354         return 0;
355 }
356 early_param("early_ioremap_debug", early_ioremap_debug_setup);
357
358 static __initdata int after_paging_init;
359 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
360
361 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
362 {
363         /* Don't assume we're using swapper_pg_dir at this point */
364         pgd_t *base = __va(read_cr3());
365         pgd_t *pgd = &base[pgd_index(addr)];
366         pud_t *pud = pud_offset(pgd, addr);
367         pmd_t *pmd = pmd_offset(pud, addr);
368
369         return pmd;
370 }
371
372 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
373 {
374         return &bm_pte[pte_index(addr)];
375 }
376
377 bool __init is_early_ioremap_ptep(pte_t *ptep)
378 {
379         return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
380 }
381
382 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
383
384 void __init early_ioremap_init(void)
385 {
386         pmd_t *pmd;
387         int i;
388
389         if (early_ioremap_debug)
390                 printk(KERN_INFO "early_ioremap_init()\n");
391
392         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
393                 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
394
395         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
396         memset(bm_pte, 0, sizeof(bm_pte));
397         pmd_populate_kernel(&init_mm, pmd, bm_pte);
398
399         /*
400          * The boot-ioremap range spans multiple pmds, for which
401          * we are not prepared:
402          */
403 #define __FIXADDR_TOP (-PAGE_SIZE)
404         BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
405                      != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
406 #undef __FIXADDR_TOP
407         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
408                 WARN_ON(1);
409                 printk(KERN_WARNING "pmd %p != %p\n",
410                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
411                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
412                         fix_to_virt(FIX_BTMAP_BEGIN));
413                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
414                         fix_to_virt(FIX_BTMAP_END));
415
416                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
417                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
418                        FIX_BTMAP_BEGIN);
419         }
420 }
421
422 void __init early_ioremap_reset(void)
423 {
424         after_paging_init = 1;
425 }
426
427 static void __init __early_set_fixmap(enum fixed_addresses idx,
428                                       phys_addr_t phys, pgprot_t flags)
429 {
430         unsigned long addr = __fix_to_virt(idx);
431         pte_t *pte;
432
433         if (idx >= __end_of_fixed_addresses) {
434                 BUG();
435                 return;
436         }
437         pte = early_ioremap_pte(addr);
438
439         if (pgprot_val(flags))
440                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
441         else
442                 pte_clear(&init_mm, addr, pte);
443         __flush_tlb_one(addr);
444 }
445
446 static inline void __init early_set_fixmap(enum fixed_addresses idx,
447                                            phys_addr_t phys, pgprot_t prot)
448 {
449         if (after_paging_init)
450                 __set_fixmap(idx, phys, prot);
451         else
452                 __early_set_fixmap(idx, phys, prot);
453 }
454
455 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
456 {
457         if (after_paging_init)
458                 clear_fixmap(idx);
459         else
460                 __early_set_fixmap(idx, 0, __pgprot(0));
461 }
462
463 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
464 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
465
466 void __init fixup_early_ioremap(void)
467 {
468         int i;
469
470         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
471                 if (prev_map[i]) {
472                         WARN_ON(1);
473                         break;
474                 }
475         }
476
477         early_ioremap_init();
478 }
479
480 static int __init check_early_ioremap_leak(void)
481 {
482         int count = 0;
483         int i;
484
485         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
486                 if (prev_map[i])
487                         count++;
488
489         if (!count)
490                 return 0;
491         WARN(1, KERN_WARNING
492                "Debug warning: early ioremap leak of %d areas detected.\n",
493                 count);
494         printk(KERN_WARNING
495                 "please boot with early_ioremap_debug and report the dmesg.\n");
496
497         return 1;
498 }
499 late_initcall(check_early_ioremap_leak);
500
501 static void __init __iomem *
502 __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
503 {
504         unsigned long offset;
505         resource_size_t last_addr;
506         unsigned int nrpages;
507         enum fixed_addresses idx0, idx;
508         int i, slot;
509
510         WARN_ON(system_state != SYSTEM_BOOTING);
511
512         slot = -1;
513         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
514                 if (!prev_map[i]) {
515                         slot = i;
516                         break;
517                 }
518         }
519
520         if (slot < 0) {
521                 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
522                          (u64)phys_addr, size);
523                 WARN_ON(1);
524                 return NULL;
525         }
526
527         if (early_ioremap_debug) {
528                 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
529                        (u64)phys_addr, size, slot);
530                 dump_stack();
531         }
532
533         /* Don't allow wraparound or zero size */
534         last_addr = phys_addr + size - 1;
535         if (!size || last_addr < phys_addr) {
536                 WARN_ON(1);
537                 return NULL;
538         }
539
540         prev_size[slot] = size;
541         /*
542          * Mappings have to be page-aligned
543          */
544         offset = phys_addr & ~PAGE_MASK;
545         phys_addr &= PAGE_MASK;
546         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
547
548         /*
549          * Mappings have to fit in the FIX_BTMAP area.
550          */
551         nrpages = size >> PAGE_SHIFT;
552         if (nrpages > NR_FIX_BTMAPS) {
553                 WARN_ON(1);
554                 return NULL;
555         }
556
557         /*
558          * Ok, go for it..
559          */
560         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
561         idx = idx0;
562         while (nrpages > 0) {
563                 early_set_fixmap(idx, phys_addr, prot);
564                 phys_addr += PAGE_SIZE;
565                 --idx;
566                 --nrpages;
567         }
568         if (early_ioremap_debug)
569                 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
570
571         prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
572         return prev_map[slot];
573 }
574
575 /* Remap an IO device */
576 void __init __iomem *
577 early_ioremap(resource_size_t phys_addr, unsigned long size)
578 {
579         return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
580 }
581
582 /* Remap memory */
583 void __init __iomem *
584 early_memremap(resource_size_t phys_addr, unsigned long size)
585 {
586         return __early_ioremap(phys_addr, size, PAGE_KERNEL);
587 }
588
589 void __init early_iounmap(void __iomem *addr, unsigned long size)
590 {
591         unsigned long virt_addr;
592         unsigned long offset;
593         unsigned int nrpages;
594         enum fixed_addresses idx;
595         int i, slot;
596
597         slot = -1;
598         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
599                 if (prev_map[i] == addr) {
600                         slot = i;
601                         break;
602                 }
603         }
604
605         if (slot < 0) {
606                 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
607                          addr, size);
608                 WARN_ON(1);
609                 return;
610         }
611
612         if (prev_size[slot] != size) {
613                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
614                          addr, size, slot, prev_size[slot]);
615                 WARN_ON(1);
616                 return;
617         }
618
619         if (early_ioremap_debug) {
620                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
621                        size, slot);
622                 dump_stack();
623         }
624
625         virt_addr = (unsigned long)addr;
626         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
627                 WARN_ON(1);
628                 return;
629         }
630         offset = virt_addr & ~PAGE_MASK;
631         nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
632
633         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
634         while (nrpages > 0) {
635                 early_clear_fixmap(idx);
636                 --idx;
637                 --nrpages;
638         }
639         prev_map[slot] = NULL;
640 }