dbfe9e891f015a87f3efb5b31349d8c1baca20c8
[pandora-kernel.git] / arch / arm / mm / consistent.c
1 /*
2  *  linux/arch/arm/mm/consistent.c
3  *
4  *  Copyright (C) 2000-2004 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  *  DMA uncached mapping support.
11  */
12 #include <linux/module.h>
13 #include <linux/mm.h>
14 #include <linux/slab.h>
15 #include <linux/errno.h>
16 #include <linux/list.h>
17 #include <linux/init.h>
18 #include <linux/device.h>
19 #include <linux/dma-mapping.h>
20
21 #include <asm/cacheflush.h>
22 #include <asm/io.h>
23 #include <asm/tlbflush.h>
24
25 #define CONSISTENT_BASE (0xffc00000)
26 #define CONSISTENT_END  (0xffe00000)
27 #define CONSISTENT_OFFSET(x)    (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
28
29 /*
30  * This is the page table (2MB) covering uncached, DMA consistent allocations
31  */
32 static pte_t *consistent_pte;
33 static DEFINE_SPINLOCK(consistent_lock);
34
35 /*
36  * VM region handling support.
37  *
38  * This should become something generic, handling VM region allocations for
39  * vmalloc and similar (ioremap, module space, etc).
40  *
41  * I envisage vmalloc()'s supporting vm_struct becoming:
42  *
43  *  struct vm_struct {
44  *    struct vm_region  region;
45  *    unsigned long     flags;
46  *    struct page       **pages;
47  *    unsigned int      nr_pages;
48  *    unsigned long     phys_addr;
49  *  };
50  *
51  * get_vm_area() would then call vm_region_alloc with an appropriate
52  * struct vm_region head (eg):
53  *
54  *  struct vm_region vmalloc_head = {
55  *      .vm_list        = LIST_HEAD_INIT(vmalloc_head.vm_list),
56  *      .vm_start       = VMALLOC_START,
57  *      .vm_end         = VMALLOC_END,
58  *  };
59  *
60  * However, vmalloc_head.vm_start is variable (typically, it is dependent on
61  * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
62  * would have to initialise this each time prior to calling vm_region_alloc().
63  */
64 struct vm_region {
65         struct list_head        vm_list;
66         unsigned long           vm_start;
67         unsigned long           vm_end;
68         struct page             *vm_pages;
69         int                     vm_active;
70 };
71
72 static struct vm_region consistent_head = {
73         .vm_list        = LIST_HEAD_INIT(consistent_head.vm_list),
74         .vm_start       = CONSISTENT_BASE,
75         .vm_end         = CONSISTENT_END,
76 };
77
78 static struct vm_region *
79 vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
80 {
81         unsigned long addr = head->vm_start, end = head->vm_end - size;
82         unsigned long flags;
83         struct vm_region *c, *new;
84
85         new = kmalloc(sizeof(struct vm_region), gfp);
86         if (!new)
87                 goto out;
88
89         spin_lock_irqsave(&consistent_lock, flags);
90
91         list_for_each_entry(c, &head->vm_list, vm_list) {
92                 if ((addr + size) < addr)
93                         goto nospc;
94                 if ((addr + size) <= c->vm_start)
95                         goto found;
96                 addr = c->vm_end;
97                 if (addr > end)
98                         goto nospc;
99         }
100
101  found:
102         /*
103          * Insert this entry _before_ the one we found.
104          */
105         list_add_tail(&new->vm_list, &c->vm_list);
106         new->vm_start = addr;
107         new->vm_end = addr + size;
108         new->vm_active = 1;
109
110         spin_unlock_irqrestore(&consistent_lock, flags);
111         return new;
112
113  nospc:
114         spin_unlock_irqrestore(&consistent_lock, flags);
115         kfree(new);
116  out:
117         return NULL;
118 }
119
120 static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr)
121 {
122         struct vm_region *c;
123         
124         list_for_each_entry(c, &head->vm_list, vm_list) {
125                 if (c->vm_active && c->vm_start == addr)
126                         goto out;
127         }
128         c = NULL;
129  out:
130         return c;
131 }
132
133 #ifdef CONFIG_HUGETLB_PAGE
134 #error ARM Coherent DMA allocator does not (yet) support huge TLB
135 #endif
136
137 static void *
138 __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
139             pgprot_t prot)
140 {
141         struct page *page;
142         struct vm_region *c;
143         unsigned long order;
144         u64 mask = ISA_DMA_THRESHOLD, limit;
145
146         if (!consistent_pte) {
147                 printk(KERN_ERR "%s: not initialised\n", __func__);
148                 dump_stack();
149                 return NULL;
150         }
151
152         if (dev) {
153                 mask = dev->coherent_dma_mask;
154
155                 /*
156                  * Sanity check the DMA mask - it must be non-zero, and
157                  * must be able to be satisfied by a DMA allocation.
158                  */
159                 if (mask == 0) {
160                         dev_warn(dev, "coherent DMA mask is unset\n");
161                         goto no_page;
162                 }
163
164                 if ((~mask) & ISA_DMA_THRESHOLD) {
165                         dev_warn(dev, "coherent DMA mask %#llx is smaller "
166                                  "than system GFP_DMA mask %#llx\n",
167                                  mask, (unsigned long long)ISA_DMA_THRESHOLD);
168                         goto no_page;
169                 }
170         }
171
172         /*
173          * Sanity check the allocation size.
174          */
175         size = PAGE_ALIGN(size);
176         limit = (mask + 1) & ~mask;
177         if ((limit && size >= limit) ||
178             size >= (CONSISTENT_END - CONSISTENT_BASE)) {
179                 printk(KERN_WARNING "coherent allocation too big "
180                        "(requested %#x mask %#llx)\n", size, mask);
181                 goto no_page;
182         }
183
184         order = get_order(size);
185
186         if (mask != 0xffffffff)
187                 gfp |= GFP_DMA;
188
189         page = alloc_pages(gfp, order);
190         if (!page)
191                 goto no_page;
192
193         /*
194          * Invalidate any data that might be lurking in the
195          * kernel direct-mapped region for device DMA.
196          */
197         {
198                 unsigned long kaddr = (unsigned long)page_address(page);
199                 memset(page_address(page), 0, size);
200                 dmac_flush_range(kaddr, kaddr + size);
201         }
202
203         /*
204          * Allocate a virtual address in the consistent mapping region.
205          */
206         c = vm_region_alloc(&consistent_head, size,
207                             gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
208         if (c) {
209                 pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
210                 struct page *end = page + (1 << order);
211
212                 c->vm_pages = page;
213
214                 /*
215                  * Set the "dma handle"
216                  */
217                 *handle = page_to_dma(dev, page);
218
219                 do {
220                         BUG_ON(!pte_none(*pte));
221
222                         set_page_count(page, 1);
223                         /*
224                          * x86 does not mark the pages reserved...
225                          */
226                         SetPageReserved(page);
227                         set_pte(pte, mk_pte(page, prot));
228                         page++;
229                         pte++;
230                 } while (size -= PAGE_SIZE);
231
232                 /*
233                  * Free the otherwise unused pages.
234                  */
235                 while (page < end) {
236                         set_page_count(page, 1);
237                         __free_page(page);
238                         page++;
239                 }
240
241                 return (void *)c->vm_start;
242         }
243
244         if (page)
245                 __free_pages(page, order);
246  no_page:
247         *handle = ~0;
248         return NULL;
249 }
250
251 /*
252  * Allocate DMA-coherent memory space and return both the kernel remapped
253  * virtual and bus address for that space.
254  */
255 void *
256 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
257 {
258         return __dma_alloc(dev, size, handle, gfp,
259                            pgprot_noncached(pgprot_kernel));
260 }
261 EXPORT_SYMBOL(dma_alloc_coherent);
262
263 /*
264  * Allocate a writecombining region, in much the same way as
265  * dma_alloc_coherent above.
266  */
267 void *
268 dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
269 {
270         return __dma_alloc(dev, size, handle, gfp,
271                            pgprot_writecombine(pgprot_kernel));
272 }
273 EXPORT_SYMBOL(dma_alloc_writecombine);
274
275 static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
276                     void *cpu_addr, dma_addr_t dma_addr, size_t size)
277 {
278         unsigned long flags, user_size, kern_size;
279         struct vm_region *c;
280         int ret = -ENXIO;
281
282         user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
283
284         spin_lock_irqsave(&consistent_lock, flags);
285         c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
286         spin_unlock_irqrestore(&consistent_lock, flags);
287
288         if (c) {
289                 unsigned long off = vma->vm_pgoff;
290
291                 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
292
293                 if (off < kern_size &&
294                     user_size <= (kern_size - off)) {
295                         vma->vm_flags |= VM_RESERVED;
296                         ret = remap_pfn_range(vma, vma->vm_start,
297                                               page_to_pfn(c->vm_pages) + off,
298                                               user_size << PAGE_SHIFT,
299                                               vma->vm_page_prot);
300                 }
301         }
302
303         return ret;
304 }
305
306 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
307                       void *cpu_addr, dma_addr_t dma_addr, size_t size)
308 {
309         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
310         return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
311 }
312 EXPORT_SYMBOL(dma_mmap_coherent);
313
314 int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
315                           void *cpu_addr, dma_addr_t dma_addr, size_t size)
316 {
317         vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
318         return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
319 }
320 EXPORT_SYMBOL(dma_mmap_writecombine);
321
322 /*
323  * free a page as defined by the above mapping.
324  * Must not be called with IRQs disabled.
325  */
326 void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
327 {
328         struct vm_region *c;
329         unsigned long flags, addr;
330         pte_t *ptep;
331
332         WARN_ON(irqs_disabled());
333
334         size = PAGE_ALIGN(size);
335
336         spin_lock_irqsave(&consistent_lock, flags);
337         c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
338         if (!c)
339                 goto no_area;
340
341         c->vm_active = 0;
342         spin_unlock_irqrestore(&consistent_lock, flags);
343
344         if ((c->vm_end - c->vm_start) != size) {
345                 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
346                        __func__, c->vm_end - c->vm_start, size);
347                 dump_stack();
348                 size = c->vm_end - c->vm_start;
349         }
350
351         ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
352         addr = c->vm_start;
353         do {
354                 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
355                 unsigned long pfn;
356
357                 ptep++;
358                 addr += PAGE_SIZE;
359
360                 if (!pte_none(pte) && pte_present(pte)) {
361                         pfn = pte_pfn(pte);
362
363                         if (pfn_valid(pfn)) {
364                                 struct page *page = pfn_to_page(pfn);
365
366                                 /*
367                                  * x86 does not mark the pages reserved...
368                                  */
369                                 ClearPageReserved(page);
370
371                                 __free_page(page);
372                                 continue;
373                         }
374                 }
375
376                 printk(KERN_CRIT "%s: bad page in kernel page table\n",
377                        __func__);
378         } while (size -= PAGE_SIZE);
379
380         flush_tlb_kernel_range(c->vm_start, c->vm_end);
381
382         spin_lock_irqsave(&consistent_lock, flags);
383         list_del(&c->vm_list);
384         spin_unlock_irqrestore(&consistent_lock, flags);
385
386         kfree(c);
387         return;
388
389  no_area:
390         spin_unlock_irqrestore(&consistent_lock, flags);
391         printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
392                __func__, cpu_addr);
393         dump_stack();
394 }
395 EXPORT_SYMBOL(dma_free_coherent);
396
397 /*
398  * Initialise the consistent memory allocation.
399  */
400 static int __init consistent_init(void)
401 {
402         pgd_t *pgd;
403         pmd_t *pmd;
404         pte_t *pte;
405         int ret = 0;
406
407         do {
408                 pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
409                 pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
410                 if (!pmd) {
411                         printk(KERN_ERR "%s: no pmd tables\n", __func__);
412                         ret = -ENOMEM;
413                         break;
414                 }
415                 WARN_ON(!pmd_none(*pmd));
416
417                 pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
418                 if (!pte) {
419                         printk(KERN_ERR "%s: no pte tables\n", __func__);
420                         ret = -ENOMEM;
421                         break;
422                 }
423
424                 consistent_pte = pte;
425         } while (0);
426
427         return ret;
428 }
429
430 core_initcall(consistent_init);
431
432 /*
433  * Make an area consistent for devices.
434  */
435 void consistent_sync(void *vaddr, size_t size, int direction)
436 {
437         unsigned long start = (unsigned long)vaddr;
438         unsigned long end   = start + size;
439
440         switch (direction) {
441         case DMA_FROM_DEVICE:           /* invalidate only */
442                 dmac_inv_range(start, end);
443                 break;
444         case DMA_TO_DEVICE:             /* writeback only */
445                 dmac_clean_range(start, end);
446                 break;
447         case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
448                 dmac_flush_range(start, end);
449                 break;
450         default:
451                 BUG();
452         }
453 }
454 EXPORT_SYMBOL(consistent_sync);