2 * linux/arch/arm/mm/consistent.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
24 #define CONSISTENT_BASE (0xffc00000)
25 #define CONSISTENT_END (0xffe00000)
26 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
29 * This is the page table (2MB) covering uncached, DMA consistent allocations
31 static pte_t *consistent_pte;
32 static DEFINE_SPINLOCK(consistent_lock);
35 * VM region handling support.
37 * This should become something generic, handling VM region allocations for
38 * vmalloc and similar (ioremap, module space, etc).
40 * I envisage vmalloc()'s supporting vm_struct becoming:
43 * struct vm_region region;
44 * unsigned long flags;
45 * struct page **pages;
46 * unsigned int nr_pages;
47 * unsigned long phys_addr;
50 * get_vm_area() would then call vm_region_alloc with an appropriate
51 * struct vm_region head (eg):
53 * struct vm_region vmalloc_head = {
54 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
55 * .vm_start = VMALLOC_START,
56 * .vm_end = VMALLOC_END,
59 * However, vmalloc_head.vm_start is variable (typically, it is dependent on
60 * the amount of RAM found at boot time.) I would imagine that get_vm_area()
61 * would have to initialise this each time prior to calling vm_region_alloc().
64 struct list_head vm_list;
65 unsigned long vm_start;
67 struct page *vm_pages;
71 static struct vm_region consistent_head = {
72 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
73 .vm_start = CONSISTENT_BASE,
74 .vm_end = CONSISTENT_END,
77 static struct vm_region *
78 vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
80 unsigned long addr = head->vm_start, end = head->vm_end - size;
82 struct vm_region *c, *new;
84 new = kmalloc(sizeof(struct vm_region), gfp);
88 spin_lock_irqsave(&consistent_lock, flags);
90 list_for_each_entry(c, &head->vm_list, vm_list) {
91 if ((addr + size) < addr)
93 if ((addr + size) <= c->vm_start)
102 * Insert this entry _before_ the one we found.
104 list_add_tail(&new->vm_list, &c->vm_list);
105 new->vm_start = addr;
106 new->vm_end = addr + size;
109 spin_unlock_irqrestore(&consistent_lock, flags);
113 spin_unlock_irqrestore(&consistent_lock, flags);
119 static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
123 list_for_each_entry(c, &head->vm_list, vm_list) {
124 if (c->vm_active && c->vm_start == addr)
132 #ifdef CONFIG_HUGETLB_PAGE
133 #error ARM Coherent DMA allocator does not (yet) support huge TLB
137 __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
143 u64 mask = ISA_DMA_THRESHOLD, limit;
145 if (!consistent_pte) {
146 printk(KERN_ERR "%s: not initialised\n", __func__);
152 mask = dev->coherent_dma_mask;
155 * Sanity check the DMA mask - it must be non-zero, and
156 * must be able to be satisfied by a DMA allocation.
159 dev_warn(dev, "coherent DMA mask is unset\n");
163 if ((~mask) & ISA_DMA_THRESHOLD) {
164 dev_warn(dev, "coherent DMA mask %#llx is smaller "
165 "than system GFP_DMA mask %#llx\n",
166 mask, (unsigned long long)ISA_DMA_THRESHOLD);
172 * Sanity check the allocation size.
174 size = PAGE_ALIGN(size);
175 limit = (mask + 1) & ~mask;
176 if ((limit && size >= limit) ||
177 size >= (CONSISTENT_END - CONSISTENT_BASE)) {
178 printk(KERN_WARNING "coherent allocation too big "
179 "(requested %#x mask %#llx)\n", size, mask);
183 order = get_order(size);
185 if (mask != 0xffffffff)
188 page = alloc_pages(gfp, order);
193 * Invalidate any data that might be lurking in the
194 * kernel direct-mapped region for device DMA.
197 unsigned long kaddr = (unsigned long)page_address(page);
198 memset(page_address(page), 0, size);
199 dmac_flush_range(kaddr, kaddr + size);
203 * Allocate a virtual address in the consistent mapping region.
205 c = vm_region_alloc(&consistent_head, size,
206 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
208 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
209 struct page *end = page + (1 << order);
214 * Set the "dma handle"
216 *handle = page_to_dma(dev, page);
219 BUG_ON(!pte_none(*pte));
221 set_page_count(page, 1);
223 * x86 does not mark the pages reserved...
225 SetPageReserved(page);
226 set_pte(pte, mk_pte(page, prot));
229 } while (size -= PAGE_SIZE);
232 * Free the otherwise unused pages.
235 set_page_count(page, 1);
240 return (void *)c->vm_start;
244 __free_pages(page, order);
251 * Allocate DMA-coherent memory space and return both the kernel remapped
252 * virtual and bus address for that space.
255 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
257 return __dma_alloc(dev, size, handle, gfp,
258 pgprot_noncached(pgprot_kernel));
260 EXPORT_SYMBOL(dma_alloc_coherent);
263 * Allocate a writecombining region, in much the same way as
264 * dma_alloc_coherent above.
267 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
269 return __dma_alloc(dev, size, handle, gfp,
270 pgprot_writecombine(pgprot_kernel));
272 EXPORT_SYMBOL(dma_alloc_writecombine);
274 static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
275 void *cpu_addr, dma_addr_t dma_addr, size_t size)
277 unsigned long flags, user_size, kern_size;
281 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
283 spin_lock_irqsave(&consistent_lock, flags);
284 c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
285 spin_unlock_irqrestore(&consistent_lock, flags);
288 unsigned long off = vma->vm_pgoff;
290 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
292 if (off < kern_size &&
293 user_size <= (kern_size - off)) {
294 vma->vm_flags |= VM_RESERVED;
295 ret = remap_pfn_range(vma, vma->vm_start,
296 page_to_pfn(c->vm_pages) + off,
297 user_size << PAGE_SHIFT,
305 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
306 void *cpu_addr, dma_addr_t dma_addr, size_t size)
308 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
309 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
311 EXPORT_SYMBOL(dma_mmap_coherent);
313 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
314 void *cpu_addr, dma_addr_t dma_addr, size_t size)
316 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
317 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
319 EXPORT_SYMBOL(dma_mmap_writecombine);
322 * free a page as defined by the above mapping.
323 * Must not be called with IRQs disabled.
325 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
328 unsigned long flags, addr;
331 WARN_ON(irqs_disabled());
333 size = PAGE_ALIGN(size);
335 spin_lock_irqsave(&consistent_lock, flags);
336 c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
341 spin_unlock_irqrestore(&consistent_lock, flags);
343 if ((c->vm_end - c->vm_start) != size) {
344 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
345 __func__, c->vm_end - c->vm_start, size);
347 size = c->vm_end - c->vm_start;
350 ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
353 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
359 if (!pte_none(pte) && pte_present(pte)) {
362 if (pfn_valid(pfn)) {
363 struct page *page = pfn_to_page(pfn);
366 * x86 does not mark the pages reserved...
368 ClearPageReserved(page);
375 printk(KERN_CRIT "%s: bad page in kernel page table\n",
377 } while (size -= PAGE_SIZE);
379 flush_tlb_kernel_range(c->vm_start, c->vm_end);
381 spin_lock_irqsave(&consistent_lock, flags);
382 list_del(&c->vm_list);
383 spin_unlock_irqrestore(&consistent_lock, flags);
389 spin_unlock_irqrestore(&consistent_lock, flags);
390 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
394 EXPORT_SYMBOL(dma_free_coherent);
397 * Initialise the consistent memory allocation.
399 static int __init consistent_init(void)
407 pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
408 pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
410 printk(KERN_ERR "%s: no pmd tables\n", __func__);
414 WARN_ON(!pmd_none(*pmd));
416 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
418 printk(KERN_ERR "%s: no pte tables\n", __func__);
423 consistent_pte = pte;
429 core_initcall(consistent_init);
432 * Make an area consistent for devices.
434 void consistent_sync(void *vaddr, size_t size, int direction)
436 unsigned long start = (unsigned long)vaddr;
437 unsigned long end = start + size;
440 case DMA_FROM_DEVICE: /* invalidate only */
441 dmac_inv_range(start, end);
443 case DMA_TO_DEVICE: /* writeback only */
444 dmac_clean_range(start, end);
446 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
447 dmac_flush_range(start, end);
453 EXPORT_SYMBOL(consistent_sync);