ARM: integrate CMA with DMA-mapping subsystem
[pandora-kernel.git] / arch / arm / mm / dma-mapping.c
index ecdb9cf..8665fd2 100644 (file)
@@ -17,7 +17,9 @@
 #include <linux/init.h>
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
 #include <linux/highmem.h>
+#include <linux/memblock.h>
 #include <linux/slab.h>
 #include <linux/iommu.h>
 #include <linux/vmalloc.h>
@@ -29,6 +31,9 @@
 #include <asm/sizes.h>
 #include <asm/mach/arch.h>
 #include <asm/dma-iommu.h>
+#include <asm/mach/map.h>
+#include <asm/system.h>
+#include <asm/dma-contiguous.h>
 
 #include "mm.h"
 
@@ -181,22 +186,6 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
 {
        unsigned long order = get_order(size);
        struct page *page, *p, *e;
-       u64 mask = get_coherent_dma_mask(dev);
-
-#ifdef CONFIG_DMA_API_DEBUG
-       u64 limit = (mask + 1) & ~mask;
-       if (limit && size >= limit) {
-               dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
-                       size, mask);
-               return NULL;
-       }
-#endif
-
-       if (!mask)
-               return NULL;
-
-       if (mask < 0xffffffffULL)
-               gfp |= GFP_DMA;
 
        page = alloc_pages(gfp, order);
        if (!page)
@@ -279,6 +268,9 @@ static int __init consistent_init(void)
        unsigned long base = consistent_base;
        unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
 
+       if (cpu_architecture() >= CPU_ARCH_ARMv6)
+               return 0;
+
        consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
        if (!consistent_pte) {
                pr_err("%s: no memory\n", __func__);
@@ -319,9 +311,101 @@ static int __init consistent_init(void)
 
        return ret;
 }
-
 core_initcall(consistent_init);
 
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+                                    pgprot_t prot, struct page **ret_page);
+
+static struct arm_vmregion_head coherent_head = {
+       .vm_lock        = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+       .vm_list        = LIST_HEAD_INIT(coherent_head.vm_list),
+};
+
+size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+static int __init early_coherent_pool(char *p)
+{
+       coherent_pool_size = memparse(p, &p);
+       return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+/*
+ * Initialise the coherent pool for atomic allocations.
+ */
+static int __init coherent_init(void)
+{
+       pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+       size_t size = coherent_pool_size;
+       struct page *page;
+       void *ptr;
+
+       if (cpu_architecture() < CPU_ARCH_ARMv6)
+               return 0;
+
+       ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+       if (ptr) {
+               coherent_head.vm_start = (unsigned long) ptr;
+               coherent_head.vm_end = (unsigned long) ptr + size;
+               printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+                      (unsigned)size / 1024);
+               return 0;
+       }
+       printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+              (unsigned)size / 1024);
+       return -ENOMEM;
+}
+/*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+postcore_initcall(coherent_init);
+
+struct dma_contig_early_reserve {
+       phys_addr_t base;
+       unsigned long size;
+};
+
+static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
+
+static int dma_mmu_remap_num __initdata;
+
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
+{
+       dma_mmu_remap[dma_mmu_remap_num].base = base;
+       dma_mmu_remap[dma_mmu_remap_num].size = size;
+       dma_mmu_remap_num++;
+}
+
+void __init dma_contiguous_remap(void)
+{
+       int i;
+       for (i = 0; i < dma_mmu_remap_num; i++) {
+               phys_addr_t start = dma_mmu_remap[i].base;
+               phys_addr_t end = start + dma_mmu_remap[i].size;
+               struct map_desc map;
+               unsigned long addr;
+
+               if (end > arm_lowmem_limit)
+                       end = arm_lowmem_limit;
+               if (start >= end)
+                       return;
+
+               map.pfn = __phys_to_pfn(start);
+               map.virtual = __phys_to_virt(start);
+               map.length = end - start;
+               map.type = MT_MEMORY_DMA_READY;
+
+               /*
+                * Clear previous low-memory mapping
+                */
+               for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+                    addr += PGDIR_SIZE)
+                       pmd_clear(pmd_off_k(addr));
+
+               iotable_init(&map, 1);
+       }
+}
+
 static void *
 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
        const void *caller)
@@ -428,6 +512,122 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
        arm_vmregion_free(&consistent_head, c);
 }
 
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+                           void *data)
+{
+       struct page *page = virt_to_page(addr);
+       pgprot_t prot = *(pgprot_t *)data;
+
+       set_pte_ext(pte, mk_pte(page, prot), 0);
+       return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
+{
+       unsigned long start = (unsigned long) page_address(page);
+       unsigned end = start + size;
+
+       apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
+       dsb();
+       flush_tlb_kernel_range(start, end);
+}
+
+static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
+                                pgprot_t prot, struct page **ret_page,
+                                const void *caller)
+{
+       struct page *page;
+       void *ptr;
+       page = __dma_alloc_buffer(dev, size, gfp);
+       if (!page)
+               return NULL;
+
+       ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
+       if (!ptr) {
+               __dma_free_buffer(page, size);
+               return NULL;
+       }
+
+       *ret_page = page;
+       return ptr;
+}
+
+static void *__alloc_from_pool(struct device *dev, size_t size,
+                              struct page **ret_page, const void *caller)
+{
+       struct arm_vmregion *c;
+       size_t align;
+
+       if (!coherent_head.vm_start) {
+               printk(KERN_ERR "%s: coherent pool not initialised!\n",
+                      __func__);
+               dump_stack();
+               return NULL;
+       }
+
+       /*
+        * Align the region allocation - allocations from pool are rather
+        * small, so align them to their order in pages, minimum is a page
+        * size. This helps reduce fragmentation of the DMA space.
+        */
+       align = PAGE_SIZE << get_order(size);
+       c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+       if (c) {
+               void *ptr = (void *)c->vm_start;
+               struct page *page = virt_to_page(ptr);
+               *ret_page = page;
+               return ptr;
+       }
+       return NULL;
+}
+
+static int __free_from_pool(void *cpu_addr, size_t size)
+{
+       unsigned long start = (unsigned long)cpu_addr;
+       unsigned long end = start + size;
+       struct arm_vmregion *c;
+
+       if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+               return 0;
+
+       c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+
+       if ((c->vm_end - c->vm_start) != size) {
+               printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+                      __func__, c->vm_end - c->vm_start, size);
+               dump_stack();
+               size = c->vm_end - c->vm_start;
+       }
+
+       arm_vmregion_free(&coherent_head, c);
+       return 1;
+}
+
+static void *__alloc_from_contiguous(struct device *dev, size_t size,
+                                    pgprot_t prot, struct page **ret_page)
+{
+       unsigned long order = get_order(size);
+       size_t count = size >> PAGE_SHIFT;
+       struct page *page;
+
+       page = dma_alloc_from_contiguous(dev, count, order);
+       if (!page)
+               return NULL;
+
+       __dma_clear_buffer(page, size);
+       __dma_remap(page, size, prot);
+
+       *ret_page = page;
+       return page_address(page);
+}
+
+static void __free_from_contiguous(struct device *dev, struct page *page,
+                                  size_t size)
+{
+       __dma_remap(page, size, pgprot_kernel);
+       dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+
 static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
 {
        prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
@@ -436,21 +636,57 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
        return prot;
 }
 
+#define nommu() 0
+
 #else  /* !CONFIG_MMU */
 
-#define __dma_alloc_remap(page, size, gfp, prot, c)    page_address(page)
-#define __dma_free_remap(addr, size)                   do { } while (0)
-#define __get_dma_pgprot(attrs, prot)  __pgprot(0)
+#define nommu() 1
+
+#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)     NULL
+#define __alloc_from_pool(dev, size, ret_page, c)              NULL
+#define __alloc_from_contiguous(dev, size, prot, ret)          NULL
+#define __free_from_pool(cpu_addr, size)                       0
+#define __free_from_contiguous(dev, page, size)                        do { } while (0)
+#define __dma_free_remap(cpu_addr, size)                       do { } while (0)
 
 #endif /* CONFIG_MMU */
 
-static void *
-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
-           pgprot_t prot, const void *caller)
+static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
+                                  struct page **ret_page)
+{
+       struct page *page;
+       page = __dma_alloc_buffer(dev, size, gfp);
+       if (!page)
+               return NULL;
+
+       *ret_page = page;
+       return page_address(page);
+}
+
+
+
+static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+                        gfp_t gfp, pgprot_t prot, const void *caller)
 {
+       u64 mask = get_coherent_dma_mask(dev);
        struct page *page;
        void *addr;
 
+#ifdef CONFIG_DMA_API_DEBUG
+       u64 limit = (mask + 1) & ~mask;
+       if (limit && size >= limit) {
+               dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+                       size, mask);
+               return NULL;
+       }
+#endif
+
+       if (!mask)
+               return NULL;
+
+       if (mask < 0xffffffffULL)
+               gfp |= GFP_DMA;
+
        /*
         * Following is a work-around (a.k.a. hack) to prevent pages
         * with __GFP_COMP being passed to split_page() which cannot
@@ -463,19 +699,17 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
        *handle = DMA_ERROR_CODE;
        size = PAGE_ALIGN(size);
 
-       page = __dma_alloc_buffer(dev, size, gfp);
-       if (!page)
-               return NULL;
-
-       if (!arch_is_coherent())
-               addr = __dma_alloc_remap(page, size, gfp, prot, caller);
+       if (arch_is_coherent() || nommu())
+               addr = __alloc_simple_buffer(dev, size, gfp, &page);
+       else if (cpu_architecture() < CPU_ARCH_ARMv6)
+               addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+       else if (gfp & GFP_ATOMIC)
+               addr = __alloc_from_pool(dev, size, &page, caller);
        else
-               addr = page_address(page);
+               addr = __alloc_from_contiguous(dev, size, prot, &page);
 
        if (addr)
                *handle = pfn_to_dma(dev, page_to_pfn(page));
-       else
-               __dma_free_buffer(page, size);
 
        return addr;
 }
@@ -506,54 +740,47 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
 {
        int ret = -ENXIO;
 #ifdef CONFIG_MMU
-       unsigned long user_size, kern_size;
-       struct arm_vmregion *c;
-
-       vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+       unsigned long pfn = dma_to_pfn(dev, dma_addr);
 
        if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
                return ret;
 
-       user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-
-       c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
-       if (c) {
-               unsigned long off = vma->vm_pgoff;
-               struct page *pages = c->priv;
-
-               kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
-
-               if (off < kern_size &&
-                   user_size <= (kern_size - off)) {
-                       ret = remap_pfn_range(vma, vma->vm_start,
-                                             page_to_pfn(pages) + off,
-                                             user_size << PAGE_SHIFT,
-                                             vma->vm_page_prot);
-               }
-       }
+       ret = remap_pfn_range(vma, vma->vm_start,
+                             pfn + vma->vm_pgoff,
+                             vma->vm_end - vma->vm_start,
+                             vma->vm_page_prot);
 #endif /* CONFIG_MMU */
 
        return ret;
 }
 
 /*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Free a buffer as defined by the above mapping.
  */
 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
                  dma_addr_t handle, struct dma_attrs *attrs)
 {
-       WARN_ON(irqs_disabled());
+       struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
        if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
                return;
 
        size = PAGE_ALIGN(size);
 
-       if (!arch_is_coherent())
+       if (arch_is_coherent() || nommu()) {
+               __dma_free_buffer(page, size);
+       } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
                __dma_free_remap(cpu_addr, size);
-
-       __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
+               __dma_free_buffer(page, size);
+       } else {
+               if (__free_from_pool(cpu_addr, size))
+                       return;
+               /*
+                * Non-atomic allocations cannot be freed with IRQs disabled
+                */
+               WARN_ON(irqs_disabled());
+               __free_from_contiguous(dev, page, size);
+       }
 }
 
 static void dma_cache_maint_page(struct page *page, unsigned long offset,