Merge branch 'linux-2.6'
[pandora-kernel.git] / arch / sh / mm / consistent.c
1 /*
2  * arch/sh/mm/consistent.c
3  *
4  * Copyright (C) 2004 - 2007  Paul Mundt
5  *
6  * Declared coherent memory functions based on arch/x86/kernel/pci-dma_32.c
7  *
8  * This file is subject to the terms and conditions of the GNU General Public
9  * License.  See the file "COPYING" in the main directory of this archive
10  * for more details.
11  */
12 #include <linux/mm.h>
13 #include <linux/dma-mapping.h>
14 #include <asm/cacheflush.h>
15 #include <asm/addrspace.h>
16 #include <asm/io.h>
17
18 struct dma_coherent_mem {
19         void            *virt_base;
20         u32             device_base;
21         int             size;
22         int             flags;
23         unsigned long   *bitmap;
24 };
25
26 void *dma_alloc_coherent(struct device *dev, size_t size,
27                            dma_addr_t *dma_handle, gfp_t gfp)
28 {
29         void *ret;
30         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
31         int order = get_order(size);
32
33         if (mem) {
34                 int page = bitmap_find_free_region(mem->bitmap, mem->size,
35                                                      order);
36                 if (page >= 0) {
37                         *dma_handle = mem->device_base + (page << PAGE_SHIFT);
38                         ret = mem->virt_base + (page << PAGE_SHIFT);
39                         memset(ret, 0, size);
40                         return ret;
41                 }
42                 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
43                         return NULL;
44         }
45
46         ret = (void *)__get_free_pages(gfp, order);
47
48         if (ret != NULL) {
49                 memset(ret, 0, size);
50                 /*
51                  * Pages from the page allocator may have data present in
52                  * cache. So flush the cache before using uncached memory.
53                  */
54                 dma_cache_sync(NULL, ret, size, DMA_BIDIRECTIONAL);
55                 *dma_handle = virt_to_phys(ret);
56         }
57         return ret;
58 }
59 EXPORT_SYMBOL(dma_alloc_coherent);
60
61 void dma_free_coherent(struct device *dev, size_t size,
62                          void *vaddr, dma_addr_t dma_handle)
63 {
64         struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
65         int order = get_order(size);
66
67         if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
68                 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
69
70                 bitmap_release_region(mem->bitmap, page, order);
71         } else {
72                 WARN_ON(irqs_disabled());       /* for portability */
73                 BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE);
74                 free_pages((unsigned long)vaddr, order);
75         }
76 }
77 EXPORT_SYMBOL(dma_free_coherent);
78
79 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
80                                 dma_addr_t device_addr, size_t size, int flags)
81 {
82         void __iomem *mem_base = NULL;
83         int pages = size >> PAGE_SHIFT;
84         int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
85
86         if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
87                 goto out;
88         if (!size)
89                 goto out;
90         if (dev->dma_mem)
91                 goto out;
92
93         /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
94
95         mem_base = ioremap_nocache(bus_addr, size);
96         if (!mem_base)
97                 goto out;
98
99         dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
100         if (!dev->dma_mem)
101                 goto out;
102         dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
103         if (!dev->dma_mem->bitmap)
104                 goto free1_out;
105
106         dev->dma_mem->virt_base = mem_base;
107         dev->dma_mem->device_base = device_addr;
108         dev->dma_mem->size = pages;
109         dev->dma_mem->flags = flags;
110
111         if (flags & DMA_MEMORY_MAP)
112                 return DMA_MEMORY_MAP;
113
114         return DMA_MEMORY_IO;
115
116  free1_out:
117         kfree(dev->dma_mem);
118  out:
119         if (mem_base)
120                 iounmap(mem_base);
121         return 0;
122 }
123 EXPORT_SYMBOL(dma_declare_coherent_memory);
124
125 void dma_release_declared_memory(struct device *dev)
126 {
127         struct dma_coherent_mem *mem = dev->dma_mem;
128
129         if (!mem)
130                 return;
131         dev->dma_mem = NULL;
132         iounmap(mem->virt_base);
133         kfree(mem->bitmap);
134         kfree(mem);
135 }
136 EXPORT_SYMBOL(dma_release_declared_memory);
137
138 void *dma_mark_declared_memory_occupied(struct device *dev,
139                                         dma_addr_t device_addr, size_t size)
140 {
141         struct dma_coherent_mem *mem = dev->dma_mem;
142         int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
143         int pos, err;
144
145         if (!mem)
146                 return ERR_PTR(-EINVAL);
147
148         pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
149         err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
150         if (err != 0)
151                 return ERR_PTR(err);
152         return mem->virt_base + (pos << PAGE_SHIFT);
153 }
154 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
155
156 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
157                     enum dma_data_direction direction)
158 {
159 #ifdef CONFIG_CPU_SH5
160         void *p1addr = vaddr;
161 #else
162         void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
163 #endif
164
165         switch (direction) {
166         case DMA_FROM_DEVICE:           /* invalidate only */
167                 __flush_invalidate_region(p1addr, size);
168                 break;
169         case DMA_TO_DEVICE:             /* writeback only */
170                 __flush_wback_region(p1addr, size);
171                 break;
172         case DMA_BIDIRECTIONAL:         /* writeback and invalidate */
173                 __flush_purge_region(p1addr, size);
174                 break;
175         default:
176                 BUG();
177         }
178 }
179 EXPORT_SYMBOL(dma_cache_sync);