Merge branches 'core/softlockup', 'core/softirq', 'core/resources', 'core/printk...
[pandora-kernel.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 #ifdef CONFIG_X86_64
26
27 static inline int phys_addr_valid(unsigned long addr)
28 {
29         return addr < (1UL << boot_cpu_data.x86_phys_bits);
30 }
31
32 unsigned long __phys_addr(unsigned long x)
33 {
34         if (x >= __START_KERNEL_map) {
35                 x -= __START_KERNEL_map;
36                 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
37                 x += phys_base;
38         } else {
39                 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
40                 x -= PAGE_OFFSET;
41                 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
42                                         !phys_addr_valid(x));
43         }
44         return x;
45 }
46 EXPORT_SYMBOL(__phys_addr);
47
48 bool __virt_addr_valid(unsigned long x)
49 {
50         if (x >= __START_KERNEL_map) {
51                 x -= __START_KERNEL_map;
52                 if (x >= KERNEL_IMAGE_SIZE)
53                         return false;
54                 x += phys_base;
55         } else {
56                 if (x < PAGE_OFFSET)
57                         return false;
58                 x -= PAGE_OFFSET;
59                 if (system_state == SYSTEM_BOOTING ?
60                                 x > MAXMEM : !phys_addr_valid(x)) {
61                         return false;
62                 }
63         }
64
65         return pfn_valid(x >> PAGE_SHIFT);
66 }
67 EXPORT_SYMBOL(__virt_addr_valid);
68
69 #else
70
71 static inline int phys_addr_valid(unsigned long addr)
72 {
73         return 1;
74 }
75
76 #ifdef CONFIG_DEBUG_VIRTUAL
77 unsigned long __phys_addr(unsigned long x)
78 {
79         /* VMALLOC_* aren't constants; not available at the boot time */
80         VIRTUAL_BUG_ON(x < PAGE_OFFSET);
81         VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
82                 is_vmalloc_addr((void *) x));
83         return x - PAGE_OFFSET;
84 }
85 EXPORT_SYMBOL(__phys_addr);
86 #endif
87
88 bool __virt_addr_valid(unsigned long x)
89 {
90         if (x < PAGE_OFFSET)
91                 return false;
92         if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
93                 return false;
94         return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
95 }
96 EXPORT_SYMBOL(__virt_addr_valid);
97
98 #endif
99
100 int page_is_ram(unsigned long pagenr)
101 {
102         resource_size_t addr, end;
103         int i;
104
105         /*
106          * A special case is the first 4Kb of memory;
107          * This is a BIOS owned area, not kernel ram, but generally
108          * not listed as such in the E820 table.
109          */
110         if (pagenr == 0)
111                 return 0;
112
113         /*
114          * Second special case: Some BIOSen report the PC BIOS
115          * area (640->1Mb) as ram even though it is not.
116          */
117         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
118                     pagenr < (BIOS_END >> PAGE_SHIFT))
119                 return 0;
120
121         for (i = 0; i < e820.nr_map; i++) {
122                 /*
123                  * Not usable memory:
124                  */
125                 if (e820.map[i].type != E820_RAM)
126                         continue;
127                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
128                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
129
130
131                 if ((pagenr >= addr) && (pagenr < end))
132                         return 1;
133         }
134         return 0;
135 }
136
137 int pagerange_is_ram(unsigned long start, unsigned long end)
138 {
139         int ram_page = 0, not_rampage = 0;
140         unsigned long page_nr;
141
142         for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
143              ++page_nr) {
144                 if (page_is_ram(page_nr))
145                         ram_page = 1;
146                 else
147                         not_rampage = 1;
148
149                 if (ram_page == not_rampage)
150                         return -1;
151         }
152
153         return ram_page;
154 }
155
156 /*
157  * Fix up the linear direct mapping of the kernel to avoid cache attribute
158  * conflicts.
159  */
160 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
161                                unsigned long prot_val)
162 {
163         unsigned long nrpages = size >> PAGE_SHIFT;
164         int err;
165
166         switch (prot_val) {
167         case _PAGE_CACHE_UC:
168         default:
169                 err = _set_memory_uc(vaddr, nrpages);
170                 break;
171         case _PAGE_CACHE_WC:
172                 err = _set_memory_wc(vaddr, nrpages);
173                 break;
174         case _PAGE_CACHE_WB:
175                 err = _set_memory_wb(vaddr, nrpages);
176                 break;
177         }
178
179         return err;
180 }
181
182 /*
183  * Remap an arbitrary physical address space into the kernel virtual
184  * address space. Needed when the kernel wants to access high addresses
185  * directly.
186  *
187  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
188  * have to convert them into an offset in a page-aligned mapping, but the
189  * caller shouldn't need to know that small detail.
190  */
191 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
192                 unsigned long size, unsigned long prot_val, void *caller)
193 {
194         unsigned long pfn, offset, vaddr;
195         resource_size_t last_addr;
196         const resource_size_t unaligned_phys_addr = phys_addr;
197         const unsigned long unaligned_size = size;
198         struct vm_struct *area;
199         unsigned long new_prot_val;
200         pgprot_t prot;
201         int retval;
202         void __iomem *ret_addr;
203
204         /* Don't allow wraparound or zero size */
205         last_addr = phys_addr + size - 1;
206         if (!size || last_addr < phys_addr)
207                 return NULL;
208
209         if (!phys_addr_valid(phys_addr)) {
210                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
211                        (unsigned long long)phys_addr);
212                 WARN_ON_ONCE(1);
213                 return NULL;
214         }
215
216         /*
217          * Don't remap the low PCI/ISA area, it's always mapped..
218          */
219         if (is_ISA_range(phys_addr, last_addr))
220                 return (__force void __iomem *)phys_to_virt(phys_addr);
221
222         /*
223          * Check if the request spans more than any BAR in the iomem resource
224          * tree.
225          */
226         WARN_ON(iomem_map_sanity_check(phys_addr, size));
227
228         /*
229          * Don't allow anybody to remap normal RAM that we're using..
230          */
231         for (pfn = phys_addr >> PAGE_SHIFT;
232                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
233                                 pfn++) {
234
235                 int is_ram = page_is_ram(pfn);
236
237                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
238                         return NULL;
239                 WARN_ON_ONCE(is_ram);
240         }
241
242         /*
243          * Mappings have to be page-aligned
244          */
245         offset = phys_addr & ~PAGE_MASK;
246         phys_addr &= PAGE_MASK;
247         size = PAGE_ALIGN(last_addr+1) - phys_addr;
248
249         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
250                                                 prot_val, &new_prot_val);
251         if (retval) {
252                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
253                 return NULL;
254         }
255
256         if (prot_val != new_prot_val) {
257                 /*
258                  * Do not fallback to certain memory types with certain
259                  * requested type:
260                  * - request is uc-, return cannot be write-back
261                  * - request is uc-, return cannot be write-combine
262                  * - request is write-combine, return cannot be write-back
263                  */
264                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
265                      (new_prot_val == _PAGE_CACHE_WB ||
266                       new_prot_val == _PAGE_CACHE_WC)) ||
267                     (prot_val == _PAGE_CACHE_WC &&
268                      new_prot_val == _PAGE_CACHE_WB)) {
269                         pr_debug(
270                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
271                                 (unsigned long long)phys_addr,
272                                 (unsigned long long)(phys_addr + size),
273                                 prot_val, new_prot_val);
274                         free_memtype(phys_addr, phys_addr + size);
275                         return NULL;
276                 }
277                 prot_val = new_prot_val;
278         }
279
280         switch (prot_val) {
281         case _PAGE_CACHE_UC:
282         default:
283                 prot = PAGE_KERNEL_IO_NOCACHE;
284                 break;
285         case _PAGE_CACHE_UC_MINUS:
286                 prot = PAGE_KERNEL_IO_UC_MINUS;
287                 break;
288         case _PAGE_CACHE_WC:
289                 prot = PAGE_KERNEL_IO_WC;
290                 break;
291         case _PAGE_CACHE_WB:
292                 prot = PAGE_KERNEL_IO;
293                 break;
294         }
295
296         /*
297          * Ok, go for it..
298          */
299         area = get_vm_area_caller(size, VM_IOREMAP, caller);
300         if (!area)
301                 return NULL;
302         area->phys_addr = phys_addr;
303         vaddr = (unsigned long) area->addr;
304         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
305                 free_memtype(phys_addr, phys_addr + size);
306                 free_vm_area(area);
307                 return NULL;
308         }
309
310         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
311                 free_memtype(phys_addr, phys_addr + size);
312                 vunmap(area->addr);
313                 return NULL;
314         }
315
316         ret_addr = (void __iomem *) (vaddr + offset);
317         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
318
319         return ret_addr;
320 }
321
322 /**
323  * ioremap_nocache     -   map bus memory into CPU space
324  * @offset:    bus address of the memory
325  * @size:      size of the resource to map
326  *
327  * ioremap_nocache performs a platform specific sequence of operations to
328  * make bus memory CPU accessible via the readb/readw/readl/writeb/
329  * writew/writel functions and the other mmio helpers. The returned
330  * address is not guaranteed to be usable directly as a virtual
331  * address.
332  *
333  * This version of ioremap ensures that the memory is marked uncachable
334  * on the CPU as well as honouring existing caching rules from things like
335  * the PCI bus. Note that there are other caches and buffers on many
336  * busses. In particular driver authors should read up on PCI writes
337  *
338  * It's useful if some control registers are in such an area and
339  * write combining or read caching is not desirable:
340  *
341  * Must be freed with iounmap.
342  */
343 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
344 {
345         /*
346          * Ideally, this should be:
347          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
348          *
349          * Till we fix all X drivers to use ioremap_wc(), we will use
350          * UC MINUS.
351          */
352         unsigned long val = _PAGE_CACHE_UC_MINUS;
353
354         return __ioremap_caller(phys_addr, size, val,
355                                 __builtin_return_address(0));
356 }
357 EXPORT_SYMBOL(ioremap_nocache);
358
359 /**
360  * ioremap_wc   -       map memory into CPU space write combined
361  * @offset:     bus address of the memory
362  * @size:       size of the resource to map
363  *
364  * This version of ioremap ensures that the memory is marked write combining.
365  * Write combining allows faster writes to some hardware devices.
366  *
367  * Must be freed with iounmap.
368  */
369 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
370 {
371         if (pat_enabled)
372                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
373                                         __builtin_return_address(0));
374         else
375                 return ioremap_nocache(phys_addr, size);
376 }
377 EXPORT_SYMBOL(ioremap_wc);
378
379 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
380 {
381         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
382                                 __builtin_return_address(0));
383 }
384 EXPORT_SYMBOL(ioremap_cache);
385
386 static void __iomem *ioremap_default(resource_size_t phys_addr,
387                                         unsigned long size)
388 {
389         unsigned long flags;
390         void *ret;
391         int err;
392
393         /*
394          * - WB for WB-able memory and no other conflicting mappings
395          * - UC_MINUS for non-WB-able memory with no other conflicting mappings
396          * - Inherit from confliting mappings otherwise
397          */
398         err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
399         if (err < 0)
400                 return NULL;
401
402         ret = (void *) __ioremap_caller(phys_addr, size, flags,
403                                         __builtin_return_address(0));
404
405         free_memtype(phys_addr, phys_addr + size);
406         return (void __iomem *)ret;
407 }
408
409 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
410                                 unsigned long prot_val)
411 {
412         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
413                                 __builtin_return_address(0));
414 }
415 EXPORT_SYMBOL(ioremap_prot);
416
417 /**
418  * iounmap - Free a IO remapping
419  * @addr: virtual address from ioremap_*
420  *
421  * Caller must ensure there is only one unmapping for the same pointer.
422  */
423 void iounmap(volatile void __iomem *addr)
424 {
425         struct vm_struct *p, *o;
426
427         if ((void __force *)addr <= high_memory)
428                 return;
429
430         /*
431          * __ioremap special-cases the PCI/ISA range by not instantiating a
432          * vm_area and by simply returning an address into the kernel mapping
433          * of ISA space.   So handle that here.
434          */
435         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
436             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
437                 return;
438
439         addr = (volatile void __iomem *)
440                 (PAGE_MASK & (unsigned long __force)addr);
441
442         mmiotrace_iounmap(addr);
443
444         /* Use the vm area unlocked, assuming the caller
445            ensures there isn't another iounmap for the same address
446            in parallel. Reuse of the virtual address is prevented by
447            leaving it in the global lists until we're done with it.
448            cpa takes care of the direct mappings. */
449         read_lock(&vmlist_lock);
450         for (p = vmlist; p; p = p->next) {
451                 if (p->addr == (void __force *)addr)
452                         break;
453         }
454         read_unlock(&vmlist_lock);
455
456         if (!p) {
457                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
458                 dump_stack();
459                 return;
460         }
461
462         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
463
464         /* Finally remove it */
465         o = remove_vm_area((void __force *)addr);
466         BUG_ON(p != o || o == NULL);
467         kfree(p);
468 }
469 EXPORT_SYMBOL(iounmap);
470
471 /*
472  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
473  * access
474  */
475 void *xlate_dev_mem_ptr(unsigned long phys)
476 {
477         void *addr;
478         unsigned long start = phys & PAGE_MASK;
479
480         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
481         if (page_is_ram(start >> PAGE_SHIFT))
482                 return __va(phys);
483
484         addr = (void __force *)ioremap_default(start, PAGE_SIZE);
485         if (addr)
486                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
487
488         return addr;
489 }
490
491 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
492 {
493         if (page_is_ram(phys >> PAGE_SHIFT))
494                 return;
495
496         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
497         return;
498 }
499
500 static int __initdata early_ioremap_debug;
501
502 static int __init early_ioremap_debug_setup(char *str)
503 {
504         early_ioremap_debug = 1;
505
506         return 0;
507 }
508 early_param("early_ioremap_debug", early_ioremap_debug_setup);
509
510 static __initdata int after_paging_init;
511 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
512
513 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
514 {
515         /* Don't assume we're using swapper_pg_dir at this point */
516         pgd_t *base = __va(read_cr3());
517         pgd_t *pgd = &base[pgd_index(addr)];
518         pud_t *pud = pud_offset(pgd, addr);
519         pmd_t *pmd = pmd_offset(pud, addr);
520
521         return pmd;
522 }
523
524 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
525 {
526         return &bm_pte[pte_index(addr)];
527 }
528
529 void __init early_ioremap_init(void)
530 {
531         pmd_t *pmd;
532
533         if (early_ioremap_debug)
534                 printk(KERN_INFO "early_ioremap_init()\n");
535
536         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
537         memset(bm_pte, 0, sizeof(bm_pte));
538         pmd_populate_kernel(&init_mm, pmd, bm_pte);
539
540         /*
541          * The boot-ioremap range spans multiple pmds, for which
542          * we are not prepared:
543          */
544         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
545                 WARN_ON(1);
546                 printk(KERN_WARNING "pmd %p != %p\n",
547                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
548                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
549                         fix_to_virt(FIX_BTMAP_BEGIN));
550                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
551                         fix_to_virt(FIX_BTMAP_END));
552
553                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
554                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
555                        FIX_BTMAP_BEGIN);
556         }
557 }
558
559 void __init early_ioremap_clear(void)
560 {
561         pmd_t *pmd;
562
563         if (early_ioremap_debug)
564                 printk(KERN_INFO "early_ioremap_clear()\n");
565
566         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
567         pmd_clear(pmd);
568         paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
569         __flush_tlb_all();
570 }
571
572 void __init early_ioremap_reset(void)
573 {
574         enum fixed_addresses idx;
575         unsigned long addr, phys;
576         pte_t *pte;
577
578         after_paging_init = 1;
579         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
580                 addr = fix_to_virt(idx);
581                 pte = early_ioremap_pte(addr);
582                 if (pte_present(*pte)) {
583                         phys = pte_val(*pte) & PAGE_MASK;
584                         set_fixmap(idx, phys);
585                 }
586         }
587 }
588
589 static void __init __early_set_fixmap(enum fixed_addresses idx,
590                                    unsigned long phys, pgprot_t flags)
591 {
592         unsigned long addr = __fix_to_virt(idx);
593         pte_t *pte;
594
595         if (idx >= __end_of_fixed_addresses) {
596                 BUG();
597                 return;
598         }
599         pte = early_ioremap_pte(addr);
600
601         if (pgprot_val(flags))
602                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
603         else
604                 pte_clear(&init_mm, addr, pte);
605         __flush_tlb_one(addr);
606 }
607
608 static inline void __init early_set_fixmap(enum fixed_addresses idx,
609                                            unsigned long phys, pgprot_t prot)
610 {
611         if (after_paging_init)
612                 __set_fixmap(idx, phys, prot);
613         else
614                 __early_set_fixmap(idx, phys, prot);
615 }
616
617 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
618 {
619         if (after_paging_init)
620                 clear_fixmap(idx);
621         else
622                 __early_set_fixmap(idx, 0, __pgprot(0));
623 }
624
625 static void *prev_map[FIX_BTMAPS_SLOTS] __initdata;
626 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
627 static int __init check_early_ioremap_leak(void)
628 {
629         int count = 0;
630         int i;
631
632         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
633                 if (prev_map[i])
634                         count++;
635
636         if (!count)
637                 return 0;
638         WARN(1, KERN_WARNING
639                "Debug warning: early ioremap leak of %d areas detected.\n",
640                 count);
641         printk(KERN_WARNING
642                 "please boot with early_ioremap_debug and report the dmesg.\n");
643
644         return 1;
645 }
646 late_initcall(check_early_ioremap_leak);
647
648 static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
649 {
650         unsigned long offset, last_addr;
651         unsigned int nrpages;
652         enum fixed_addresses idx0, idx;
653         int i, slot;
654
655         WARN_ON(system_state != SYSTEM_BOOTING);
656
657         slot = -1;
658         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
659                 if (!prev_map[i]) {
660                         slot = i;
661                         break;
662                 }
663         }
664
665         if (slot < 0) {
666                 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
667                          phys_addr, size);
668                 WARN_ON(1);
669                 return NULL;
670         }
671
672         if (early_ioremap_debug) {
673                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
674                        phys_addr, size, slot);
675                 dump_stack();
676         }
677
678         /* Don't allow wraparound or zero size */
679         last_addr = phys_addr + size - 1;
680         if (!size || last_addr < phys_addr) {
681                 WARN_ON(1);
682                 return NULL;
683         }
684
685         prev_size[slot] = size;
686         /*
687          * Mappings have to be page-aligned
688          */
689         offset = phys_addr & ~PAGE_MASK;
690         phys_addr &= PAGE_MASK;
691         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
692
693         /*
694          * Mappings have to fit in the FIX_BTMAP area.
695          */
696         nrpages = size >> PAGE_SHIFT;
697         if (nrpages > NR_FIX_BTMAPS) {
698                 WARN_ON(1);
699                 return NULL;
700         }
701
702         /*
703          * Ok, go for it..
704          */
705         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
706         idx = idx0;
707         while (nrpages > 0) {
708                 early_set_fixmap(idx, phys_addr, prot);
709                 phys_addr += PAGE_SIZE;
710                 --idx;
711                 --nrpages;
712         }
713         if (early_ioremap_debug)
714                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
715
716         prev_map[slot] = (void *) (offset + fix_to_virt(idx0));
717         return prev_map[slot];
718 }
719
720 /* Remap an IO device */
721 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
722 {
723         return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
724 }
725
726 /* Remap memory */
727 void __init *early_memremap(unsigned long phys_addr, unsigned long size)
728 {
729         return __early_ioremap(phys_addr, size, PAGE_KERNEL);
730 }
731
732 void __init early_iounmap(void *addr, unsigned long size)
733 {
734         unsigned long virt_addr;
735         unsigned long offset;
736         unsigned int nrpages;
737         enum fixed_addresses idx;
738         int i, slot;
739
740         slot = -1;
741         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
742                 if (prev_map[i] == addr) {
743                         slot = i;
744                         break;
745                 }
746         }
747
748         if (slot < 0) {
749                 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
750                          addr, size);
751                 WARN_ON(1);
752                 return;
753         }
754
755         if (prev_size[slot] != size) {
756                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
757                          addr, size, slot, prev_size[slot]);
758                 WARN_ON(1);
759                 return;
760         }
761
762         if (early_ioremap_debug) {
763                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
764                        size, slot);
765                 dump_stack();
766         }
767
768         virt_addr = (unsigned long)addr;
769         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
770                 WARN_ON(1);
771                 return;
772         }
773         offset = virt_addr & ~PAGE_MASK;
774         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
775
776         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
777         while (nrpages > 0) {
778                 early_clear_fixmap(idx);
779                 --idx;
780                 --nrpages;
781         }
782         prev_map[slot] = 0;
783 }
784
785 void __this_fixmap_does_not_exist(void)
786 {
787         WARN_ON(1);
788 }