Merge tag 'r8169-20060920-00' of git://electric-eye.fr.zoreil.com/home/romieu/linux...
[pandora-kernel.git] / arch / arm / mm / ioremap.c
index c1f7180..88a999d 100644 (file)
 
 #include <asm/cacheflush.h>
 #include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
+#include <asm/sizes.h>
+
+/*
+ * Used by ioremap() and iounmap() code to mark (super)section-mapped
+ * I/O regions in vm_struct->flags field.
+ */
+#define VM_ARM_SECTION_MAPPING 0x80000000
 
 static inline void
 remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
@@ -113,10 +122,168 @@ remap_area_pages(unsigned long start, unsigned long pfn,
                dir++;
        } while (address && (address < end));
 
-       flush_cache_vmap(start, end);
        return err;
 }
 
+
+void __check_kvm_seq(struct mm_struct *mm)
+{
+       unsigned int seq;
+
+       do {
+               seq = init_mm.context.kvm_seq;
+               memcpy(pgd_offset(mm, VMALLOC_START),
+                      pgd_offset_k(VMALLOC_START),
+                      sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
+                                       pgd_index(VMALLOC_START)));
+               mm->context.kvm_seq = seq;
+       } while (seq != init_mm.context.kvm_seq);
+}
+
+#ifndef CONFIG_SMP
+/*
+ * Section support is unsafe on SMP - If you iounmap and ioremap a region,
+ * the other CPUs will not see this change until their next context switch.
+ * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
+ * which requires the new ioremap'd region to be referenced, the CPU will
+ * reference the _old_ region.
+ *
+ * Note that get_vm_area() allocates a guard 4K page, so we need to mask
+ * the size back to 1MB aligned or we will overflow in the loop below.
+ */
+static void unmap_area_sections(unsigned long virt, unsigned long size)
+{
+       unsigned long addr = virt, end = virt + (size & ~SZ_1M);
+       pgd_t *pgd;
+
+       flush_cache_vunmap(addr, end);
+       pgd = pgd_offset_k(addr);
+       do {
+               pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
+
+               pmd = *pmdp;
+               if (!pmd_none(pmd)) {
+                       /*
+                        * Clear the PMD from the page table, and
+                        * increment the kvm sequence so others
+                        * notice this change.
+                        *
+                        * Note: this is still racy on SMP machines.
+                        */
+                       pmd_clear(pmdp);
+                       init_mm.context.kvm_seq++;
+
+                       /*
+                        * Free the page table, if there was one.
+                        */
+                       if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
+                               pte_free_kernel(pmd_page_kernel(pmd));
+               }
+
+               addr += PGDIR_SIZE;
+               pgd++;
+       } while (addr < end);
+
+       /*
+        * Ensure that the active_mm is up to date - we want to
+        * catch any use-after-iounmap cases.
+        */
+       if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
+               __check_kvm_seq(current->active_mm);
+
+       flush_tlb_kernel_range(virt, end);
+}
+
+static int
+remap_area_sections(unsigned long virt, unsigned long pfn,
+                   unsigned long size, unsigned long flags)
+{
+       unsigned long prot, addr = virt, end = virt + size;
+       pgd_t *pgd;
+
+       /*
+        * Remove and free any PTE-based mapping, and
+        * sync the current kernel mapping.
+        */
+       unmap_area_sections(virt, size);
+
+       prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
+              (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
+
+       /*
+        * ARMv6 and above need XN set to prevent speculative prefetches
+        * hitting IO.
+        */
+       if (cpu_architecture() >= CPU_ARCH_ARMv6)
+               prot |= PMD_SECT_XN;
+
+       pgd = pgd_offset_k(addr);
+       do {
+               pmd_t *pmd = pmd_offset(pgd, addr);
+
+               pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
+               pfn += SZ_1M >> PAGE_SHIFT;
+               pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
+               pfn += SZ_1M >> PAGE_SHIFT;
+               flush_pmd_entry(pmd);
+
+               addr += PGDIR_SIZE;
+               pgd++;
+       } while (addr < end);
+
+       return 0;
+}
+
+static int
+remap_area_supersections(unsigned long virt, unsigned long pfn,
+                        unsigned long size, unsigned long flags)
+{
+       unsigned long prot, addr = virt, end = virt + size;
+       pgd_t *pgd;
+
+       /*
+        * Remove and free any PTE-based mapping, and
+        * sync the current kernel mapping.
+        */
+       unmap_area_sections(virt, size);
+
+       prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
+                       PMD_DOMAIN(DOMAIN_IO) |
+                       (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
+
+       /*
+        * ARMv6 and above need XN set to prevent speculative prefetches
+        * hitting IO.
+        */
+       if (cpu_architecture() >= CPU_ARCH_ARMv6)
+               prot |= PMD_SECT_XN;
+
+       pgd = pgd_offset_k(virt);
+       do {
+               unsigned long super_pmd_val, i;
+
+               super_pmd_val = __pfn_to_phys(pfn) | prot;
+               super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
+
+               for (i = 0; i < 8; i++) {
+                       pmd_t *pmd = pmd_offset(pgd, addr);
+
+                       pmd[0] = __pmd(super_pmd_val);
+                       pmd[1] = __pmd(super_pmd_val);
+                       flush_pmd_entry(pmd);
+
+                       addr += PGDIR_SIZE;
+                       pgd++;
+               }
+
+               pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
+       } while (addr < end);
+
+       return 0;
+}
+#endif
+
+
 /*
  * Remap an arbitrary physical address space into the kernel virtual
  * address space. Needed when the kernel wants to access high addresses
@@ -133,18 +300,41 @@ void __iomem *
 __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
              unsigned long flags)
 {
+       int err;
        unsigned long addr;
        struct vm_struct * area;
 
+       /*
+        * High mappings must be supersection aligned
+        */
+       if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
+               return NULL;
+
        area = get_vm_area(size, VM_IOREMAP);
        if (!area)
                return NULL;
        addr = (unsigned long)area->addr;
-       if (remap_area_pages(addr, pfn, size, flags)) {
+
+#ifndef CONFIG_SMP
+       if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
+              cpu_is_xsc3()) &&
+              !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
+               area->flags |= VM_ARM_SECTION_MAPPING;
+               err = remap_area_supersections(addr, pfn, size, flags);
+       } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
+               area->flags |= VM_ARM_SECTION_MAPPING;
+               err = remap_area_sections(addr, pfn, size, flags);
+       } else
+#endif
+               err = remap_area_pages(addr, pfn, size, flags);
+
+       if (err) {
                vunmap((void *)addr);
                return NULL;
        }
-       return (void __iomem *) (offset + (char *)addr);
+
+       flush_cache_vmap(addr, addr + size);
+       return (void __iomem *) (offset + addr);
 }
 EXPORT_SYMBOL(__ioremap_pfn);
 
@@ -173,53 +363,38 @@ EXPORT_SYMBOL(__ioremap);
 
 void __iounmap(void __iomem *addr)
 {
-       vunmap((void *)(PAGE_MASK & (unsigned long)addr));
-}
-EXPORT_SYMBOL(__iounmap);
-
-#ifdef __io
-void __iomem *ioport_map(unsigned long port, unsigned int nr)
-{
-       return __io(port);
-}
-EXPORT_SYMBOL(ioport_map);
-
-void ioport_unmap(void __iomem *addr)
-{
-}
-EXPORT_SYMBOL(ioport_unmap);
+#ifndef CONFIG_SMP
+       struct vm_struct **p, *tmp;
 #endif
+       unsigned int section_mapping = 0;
 
-#ifdef CONFIG_PCI
-#include <linux/pci.h>
-#include <linux/ioport.h>
+       addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
 
-void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
-{
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len   = pci_resource_len(dev, bar);
-       unsigned long flags = pci_resource_flags(dev, bar);
-
-       if (!len || !start)
-               return NULL;
-       if (maxlen && len > maxlen)
-               len = maxlen;
-       if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
-       if (flags & IORESOURCE_MEM) {
-               if (flags & IORESOURCE_CACHEABLE)
-                       return ioremap(start, len);
-               return ioremap_nocache(start, len);
+#ifndef CONFIG_SMP
+       /*
+        * If this is a section based mapping we need to handle it
+        * specially as the VM subysystem does not know how to handle
+        * such a beast. We need the lock here b/c we need to clear
+        * all the mappings before the area can be reclaimed
+        * by someone else.
+        */
+       write_lock(&vmlist_lock);
+       for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
+               if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
+                       if (tmp->flags & VM_ARM_SECTION_MAPPING) {
+                               *p = tmp->next;
+                               unmap_area_sections((unsigned long)tmp->addr,
+                                                   tmp->size);
+                               kfree(tmp);
+                               section_mapping = 1;
+                       }
+                       break;
+               }
        }
-       return NULL;
-}
-EXPORT_SYMBOL(pci_iomap);
+       write_unlock(&vmlist_lock);
+#endif
 
-void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
-{
-       if ((unsigned long)addr >= VMALLOC_START &&
-           (unsigned long)addr < VMALLOC_END)
-               iounmap(addr);
+       if (!section_mapping)
+               vunmap(addr);
 }
-EXPORT_SYMBOL(pci_iounmap);
-#endif
+EXPORT_SYMBOL(__iounmap);