Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6
[pandora-kernel.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
16
17 #include <asm/cacheflush.h>
18 #include <asm/e820.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pat.h>
24
25 static inline int phys_addr_valid(resource_size_t addr)
26 {
27 #ifdef CONFIG_PHYS_ADDR_T_64BIT
28         return !(addr >> boot_cpu_data.x86_phys_bits);
29 #else
30         return 1;
31 #endif
32 }
33
34 #ifdef CONFIG_X86_64
35
36 unsigned long __phys_addr(unsigned long x)
37 {
38         if (x >= __START_KERNEL_map) {
39                 x -= __START_KERNEL_map;
40                 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
41                 x += phys_base;
42         } else {
43                 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
44                 x -= PAGE_OFFSET;
45                 VIRTUAL_BUG_ON(!phys_addr_valid(x));
46         }
47         return x;
48 }
49 EXPORT_SYMBOL(__phys_addr);
50
51 bool __virt_addr_valid(unsigned long x)
52 {
53         if (x >= __START_KERNEL_map) {
54                 x -= __START_KERNEL_map;
55                 if (x >= KERNEL_IMAGE_SIZE)
56                         return false;
57                 x += phys_base;
58         } else {
59                 if (x < PAGE_OFFSET)
60                         return false;
61                 x -= PAGE_OFFSET;
62                 if (!phys_addr_valid(x))
63                         return false;
64         }
65
66         return pfn_valid(x >> PAGE_SHIFT);
67 }
68 EXPORT_SYMBOL(__virt_addr_valid);
69
70 #else
71
72 #ifdef CONFIG_DEBUG_VIRTUAL
73 unsigned long __phys_addr(unsigned long x)
74 {
75         /* VMALLOC_* aren't constants  */
76         VIRTUAL_BUG_ON(x < PAGE_OFFSET);
77         VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
78         return x - PAGE_OFFSET;
79 }
80 EXPORT_SYMBOL(__phys_addr);
81 #endif
82
83 bool __virt_addr_valid(unsigned long x)
84 {
85         if (x < PAGE_OFFSET)
86                 return false;
87         if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
88                 return false;
89         if (x >= FIXADDR_START)
90                 return false;
91         return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
92 }
93 EXPORT_SYMBOL(__virt_addr_valid);
94
95 #endif
96
97 int page_is_ram(unsigned long pagenr)
98 {
99         resource_size_t addr, end;
100         int i;
101
102         /*
103          * A special case is the first 4Kb of memory;
104          * This is a BIOS owned area, not kernel ram, but generally
105          * not listed as such in the E820 table.
106          */
107         if (pagenr == 0)
108                 return 0;
109
110         /*
111          * Second special case: Some BIOSen report the PC BIOS
112          * area (640->1Mb) as ram even though it is not.
113          */
114         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
115                     pagenr < (BIOS_END >> PAGE_SHIFT))
116                 return 0;
117
118         for (i = 0; i < e820.nr_map; i++) {
119                 /*
120                  * Not usable memory:
121                  */
122                 if (e820.map[i].type != E820_RAM)
123                         continue;
124                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
125                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
126
127
128                 if ((pagenr >= addr) && (pagenr < end))
129                         return 1;
130         }
131         return 0;
132 }
133
134 /*
135  * Fix up the linear direct mapping of the kernel to avoid cache attribute
136  * conflicts.
137  */
138 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
139                                unsigned long prot_val)
140 {
141         unsigned long nrpages = size >> PAGE_SHIFT;
142         int err;
143
144         switch (prot_val) {
145         case _PAGE_CACHE_UC:
146         default:
147                 err = _set_memory_uc(vaddr, nrpages);
148                 break;
149         case _PAGE_CACHE_WC:
150                 err = _set_memory_wc(vaddr, nrpages);
151                 break;
152         case _PAGE_CACHE_WB:
153                 err = _set_memory_wb(vaddr, nrpages);
154                 break;
155         }
156
157         return err;
158 }
159
160 /*
161  * Remap an arbitrary physical address space into the kernel virtual
162  * address space. Needed when the kernel wants to access high addresses
163  * directly.
164  *
165  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
166  * have to convert them into an offset in a page-aligned mapping, but the
167  * caller shouldn't need to know that small detail.
168  */
169 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
170                 unsigned long size, unsigned long prot_val, void *caller)
171 {
172         unsigned long pfn, offset, vaddr;
173         resource_size_t last_addr;
174         const resource_size_t unaligned_phys_addr = phys_addr;
175         const unsigned long unaligned_size = size;
176         struct vm_struct *area;
177         unsigned long new_prot_val;
178         pgprot_t prot;
179         int retval;
180         void __iomem *ret_addr;
181
182         /* Don't allow wraparound or zero size */
183         last_addr = phys_addr + size - 1;
184         if (!size || last_addr < phys_addr)
185                 return NULL;
186
187         if (!phys_addr_valid(phys_addr)) {
188                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
189                        (unsigned long long)phys_addr);
190                 WARN_ON_ONCE(1);
191                 return NULL;
192         }
193
194         /*
195          * Don't remap the low PCI/ISA area, it's always mapped..
196          */
197         if (is_ISA_range(phys_addr, last_addr))
198                 return (__force void __iomem *)phys_to_virt(phys_addr);
199
200         /*
201          * Check if the request spans more than any BAR in the iomem resource
202          * tree.
203          */
204         WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
205                   KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
206
207         /*
208          * Don't allow anybody to remap normal RAM that we're using..
209          */
210         for (pfn = phys_addr >> PAGE_SHIFT;
211                                 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
212                                 pfn++) {
213
214                 int is_ram = page_is_ram(pfn);
215
216                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
217                         return NULL;
218                 WARN_ON_ONCE(is_ram);
219         }
220
221         /*
222          * Mappings have to be page-aligned
223          */
224         offset = phys_addr & ~PAGE_MASK;
225         phys_addr &= PAGE_MASK;
226         size = PAGE_ALIGN(last_addr+1) - phys_addr;
227
228         retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
229                                                 prot_val, &new_prot_val);
230         if (retval) {
231                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
232                 return NULL;
233         }
234
235         if (prot_val != new_prot_val) {
236                 /*
237                  * Do not fallback to certain memory types with certain
238                  * requested type:
239                  * - request is uc-, return cannot be write-back
240                  * - request is uc-, return cannot be write-combine
241                  * - request is write-combine, return cannot be write-back
242                  */
243                 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
244                      (new_prot_val == _PAGE_CACHE_WB ||
245                       new_prot_val == _PAGE_CACHE_WC)) ||
246                     (prot_val == _PAGE_CACHE_WC &&
247                      new_prot_val == _PAGE_CACHE_WB)) {
248                         pr_debug(
249                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
250                                 (unsigned long long)phys_addr,
251                                 (unsigned long long)(phys_addr + size),
252                                 prot_val, new_prot_val);
253                         free_memtype(phys_addr, phys_addr + size);
254                         return NULL;
255                 }
256                 prot_val = new_prot_val;
257         }
258
259         switch (prot_val) {
260         case _PAGE_CACHE_UC:
261         default:
262                 prot = PAGE_KERNEL_IO_NOCACHE;
263                 break;
264         case _PAGE_CACHE_UC_MINUS:
265                 prot = PAGE_KERNEL_IO_UC_MINUS;
266                 break;
267         case _PAGE_CACHE_WC:
268                 prot = PAGE_KERNEL_IO_WC;
269                 break;
270         case _PAGE_CACHE_WB:
271                 prot = PAGE_KERNEL_IO;
272                 break;
273         }
274
275         /*
276          * Ok, go for it..
277          */
278         area = get_vm_area_caller(size, VM_IOREMAP, caller);
279         if (!area)
280                 return NULL;
281         area->phys_addr = phys_addr;
282         vaddr = (unsigned long) area->addr;
283         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
284                 free_memtype(phys_addr, phys_addr + size);
285                 free_vm_area(area);
286                 return NULL;
287         }
288
289         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
290                 free_memtype(phys_addr, phys_addr + size);
291                 vunmap(area->addr);
292                 return NULL;
293         }
294
295         ret_addr = (void __iomem *) (vaddr + offset);
296         mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
297
298         return ret_addr;
299 }
300
301 /**
302  * ioremap_nocache     -   map bus memory into CPU space
303  * @offset:    bus address of the memory
304  * @size:      size of the resource to map
305  *
306  * ioremap_nocache performs a platform specific sequence of operations to
307  * make bus memory CPU accessible via the readb/readw/readl/writeb/
308  * writew/writel functions and the other mmio helpers. The returned
309  * address is not guaranteed to be usable directly as a virtual
310  * address.
311  *
312  * This version of ioremap ensures that the memory is marked uncachable
313  * on the CPU as well as honouring existing caching rules from things like
314  * the PCI bus. Note that there are other caches and buffers on many
315  * busses. In particular driver authors should read up on PCI writes
316  *
317  * It's useful if some control registers are in such an area and
318  * write combining or read caching is not desirable:
319  *
320  * Must be freed with iounmap.
321  */
322 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
323 {
324         /*
325          * Ideally, this should be:
326          *      pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
327          *
328          * Till we fix all X drivers to use ioremap_wc(), we will use
329          * UC MINUS.
330          */
331         unsigned long val = _PAGE_CACHE_UC_MINUS;
332
333         return __ioremap_caller(phys_addr, size, val,
334                                 __builtin_return_address(0));
335 }
336 EXPORT_SYMBOL(ioremap_nocache);
337
338 /**
339  * ioremap_wc   -       map memory into CPU space write combined
340  * @offset:     bus address of the memory
341  * @size:       size of the resource to map
342  *
343  * This version of ioremap ensures that the memory is marked write combining.
344  * Write combining allows faster writes to some hardware devices.
345  *
346  * Must be freed with iounmap.
347  */
348 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
349 {
350         if (pat_enabled)
351                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
352                                         __builtin_return_address(0));
353         else
354                 return ioremap_nocache(phys_addr, size);
355 }
356 EXPORT_SYMBOL(ioremap_wc);
357
358 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
359 {
360         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
361                                 __builtin_return_address(0));
362 }
363 EXPORT_SYMBOL(ioremap_cache);
364
365 static void __iomem *ioremap_default(resource_size_t phys_addr,
366                                         unsigned long size)
367 {
368         unsigned long flags;
369         void __iomem *ret;
370         int err;
371
372         /*
373          * - WB for WB-able memory and no other conflicting mappings
374          * - UC_MINUS for non-WB-able memory with no other conflicting mappings
375          * - Inherit from confliting mappings otherwise
376          */
377         err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
378         if (err < 0)
379                 return NULL;
380
381         ret = __ioremap_caller(phys_addr, size, flags,
382                                __builtin_return_address(0));
383
384         free_memtype(phys_addr, phys_addr + size);
385         return ret;
386 }
387
388 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
389                                 unsigned long prot_val)
390 {
391         return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
392                                 __builtin_return_address(0));
393 }
394 EXPORT_SYMBOL(ioremap_prot);
395
396 /**
397  * iounmap - Free a IO remapping
398  * @addr: virtual address from ioremap_*
399  *
400  * Caller must ensure there is only one unmapping for the same pointer.
401  */
402 void iounmap(volatile void __iomem *addr)
403 {
404         struct vm_struct *p, *o;
405
406         if ((void __force *)addr <= high_memory)
407                 return;
408
409         /*
410          * __ioremap special-cases the PCI/ISA range by not instantiating a
411          * vm_area and by simply returning an address into the kernel mapping
412          * of ISA space.   So handle that here.
413          */
414         if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
415             (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
416                 return;
417
418         addr = (volatile void __iomem *)
419                 (PAGE_MASK & (unsigned long __force)addr);
420
421         mmiotrace_iounmap(addr);
422
423         /* Use the vm area unlocked, assuming the caller
424            ensures there isn't another iounmap for the same address
425            in parallel. Reuse of the virtual address is prevented by
426            leaving it in the global lists until we're done with it.
427            cpa takes care of the direct mappings. */
428         read_lock(&vmlist_lock);
429         for (p = vmlist; p; p = p->next) {
430                 if (p->addr == (void __force *)addr)
431                         break;
432         }
433         read_unlock(&vmlist_lock);
434
435         if (!p) {
436                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
437                 dump_stack();
438                 return;
439         }
440
441         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
442
443         /* Finally remove it */
444         o = remove_vm_area((void __force *)addr);
445         BUG_ON(p != o || o == NULL);
446         kfree(p);
447 }
448 EXPORT_SYMBOL(iounmap);
449
450 /*
451  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
452  * access
453  */
454 void *xlate_dev_mem_ptr(unsigned long phys)
455 {
456         void *addr;
457         unsigned long start = phys & PAGE_MASK;
458
459         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
460         if (page_is_ram(start >> PAGE_SHIFT))
461                 return __va(phys);
462
463         addr = (void __force *)ioremap_default(start, PAGE_SIZE);
464         if (addr)
465                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
466
467         return addr;
468 }
469
470 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
471 {
472         if (page_is_ram(phys >> PAGE_SHIFT))
473                 return;
474
475         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
476         return;
477 }
478
479 static int __initdata early_ioremap_debug;
480
481 static int __init early_ioremap_debug_setup(char *str)
482 {
483         early_ioremap_debug = 1;
484
485         return 0;
486 }
487 early_param("early_ioremap_debug", early_ioremap_debug_setup);
488
489 static __initdata int after_paging_init;
490 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
491
492 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
493 {
494         /* Don't assume we're using swapper_pg_dir at this point */
495         pgd_t *base = __va(read_cr3());
496         pgd_t *pgd = &base[pgd_index(addr)];
497         pud_t *pud = pud_offset(pgd, addr);
498         pmd_t *pmd = pmd_offset(pud, addr);
499
500         return pmd;
501 }
502
503 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
504 {
505         return &bm_pte[pte_index(addr)];
506 }
507
508 static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
509
510 void __init early_ioremap_init(void)
511 {
512         pmd_t *pmd;
513         int i;
514
515         if (early_ioremap_debug)
516                 printk(KERN_INFO "early_ioremap_init()\n");
517
518         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
519                 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
520
521         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
522         memset(bm_pte, 0, sizeof(bm_pte));
523         pmd_populate_kernel(&init_mm, pmd, bm_pte);
524
525         /*
526          * The boot-ioremap range spans multiple pmds, for which
527          * we are not prepared:
528          */
529         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
530                 WARN_ON(1);
531                 printk(KERN_WARNING "pmd %p != %p\n",
532                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
533                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
534                         fix_to_virt(FIX_BTMAP_BEGIN));
535                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
536                         fix_to_virt(FIX_BTMAP_END));
537
538                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
539                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
540                        FIX_BTMAP_BEGIN);
541         }
542 }
543
544 void __init early_ioremap_reset(void)
545 {
546         after_paging_init = 1;
547 }
548
549 static void __init __early_set_fixmap(enum fixed_addresses idx,
550                                    unsigned long phys, pgprot_t flags)
551 {
552         unsigned long addr = __fix_to_virt(idx);
553         pte_t *pte;
554
555         if (idx >= __end_of_fixed_addresses) {
556                 BUG();
557                 return;
558         }
559         pte = early_ioremap_pte(addr);
560
561         if (pgprot_val(flags))
562                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
563         else
564                 pte_clear(&init_mm, addr, pte);
565         __flush_tlb_one(addr);
566 }
567
568 static inline void __init early_set_fixmap(enum fixed_addresses idx,
569                                            unsigned long phys, pgprot_t prot)
570 {
571         if (after_paging_init)
572                 __set_fixmap(idx, phys, prot);
573         else
574                 __early_set_fixmap(idx, phys, prot);
575 }
576
577 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
578 {
579         if (after_paging_init)
580                 clear_fixmap(idx);
581         else
582                 __early_set_fixmap(idx, 0, __pgprot(0));
583 }
584
585 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
586 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
587
588 static int __init check_early_ioremap_leak(void)
589 {
590         int count = 0;
591         int i;
592
593         for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
594                 if (prev_map[i])
595                         count++;
596
597         if (!count)
598                 return 0;
599         WARN(1, KERN_WARNING
600                "Debug warning: early ioremap leak of %d areas detected.\n",
601                 count);
602         printk(KERN_WARNING
603                 "please boot with early_ioremap_debug and report the dmesg.\n");
604
605         return 1;
606 }
607 late_initcall(check_early_ioremap_leak);
608
609 static void __init __iomem *
610 __early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
611 {
612         unsigned long offset, last_addr;
613         unsigned int nrpages;
614         enum fixed_addresses idx0, idx;
615         int i, slot;
616
617         WARN_ON(system_state != SYSTEM_BOOTING);
618
619         slot = -1;
620         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
621                 if (!prev_map[i]) {
622                         slot = i;
623                         break;
624                 }
625         }
626
627         if (slot < 0) {
628                 printk(KERN_INFO "early_iomap(%08lx, %08lx) not found slot\n",
629                          phys_addr, size);
630                 WARN_ON(1);
631                 return NULL;
632         }
633
634         if (early_ioremap_debug) {
635                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
636                        phys_addr, size, slot);
637                 dump_stack();
638         }
639
640         /* Don't allow wraparound or zero size */
641         last_addr = phys_addr + size - 1;
642         if (!size || last_addr < phys_addr) {
643                 WARN_ON(1);
644                 return NULL;
645         }
646
647         prev_size[slot] = size;
648         /*
649          * Mappings have to be page-aligned
650          */
651         offset = phys_addr & ~PAGE_MASK;
652         phys_addr &= PAGE_MASK;
653         size = PAGE_ALIGN(last_addr + 1) - phys_addr;
654
655         /*
656          * Mappings have to fit in the FIX_BTMAP area.
657          */
658         nrpages = size >> PAGE_SHIFT;
659         if (nrpages > NR_FIX_BTMAPS) {
660                 WARN_ON(1);
661                 return NULL;
662         }
663
664         /*
665          * Ok, go for it..
666          */
667         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
668         idx = idx0;
669         while (nrpages > 0) {
670                 early_set_fixmap(idx, phys_addr, prot);
671                 phys_addr += PAGE_SIZE;
672                 --idx;
673                 --nrpages;
674         }
675         if (early_ioremap_debug)
676                 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
677
678         prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
679         return prev_map[slot];
680 }
681
682 /* Remap an IO device */
683 void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
684 {
685         return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
686 }
687
688 /* Remap memory */
689 void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
690 {
691         return __early_ioremap(phys_addr, size, PAGE_KERNEL);
692 }
693
694 void __init early_iounmap(void __iomem *addr, unsigned long size)
695 {
696         unsigned long virt_addr;
697         unsigned long offset;
698         unsigned int nrpages;
699         enum fixed_addresses idx;
700         int i, slot;
701
702         slot = -1;
703         for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
704                 if (prev_map[i] == addr) {
705                         slot = i;
706                         break;
707                 }
708         }
709
710         if (slot < 0) {
711                 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
712                          addr, size);
713                 WARN_ON(1);
714                 return;
715         }
716
717         if (prev_size[slot] != size) {
718                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
719                          addr, size, slot, prev_size[slot]);
720                 WARN_ON(1);
721                 return;
722         }
723
724         if (early_ioremap_debug) {
725                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
726                        size, slot);
727                 dump_stack();
728         }
729
730         virt_addr = (unsigned long)addr;
731         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
732                 WARN_ON(1);
733                 return;
734         }
735         offset = virt_addr & ~PAGE_MASK;
736         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
737
738         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
739         while (nrpages > 0) {
740                 early_clear_fixmap(idx);
741                 --idx;
742                 --nrpages;
743         }
744         prev_map[slot] = NULL;
745 }