Merge branch 'topic/ctxfi' into for-linus
[pandora-kernel.git] / arch / mips / mm / dma-default.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2000  Ani Joshi <ajoshi@unixbox.com>
7  * Copyright (C) 2000, 2001, 06  Ralf Baechle <ralf@linux-mips.org>
8  * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9  */
10
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17
18 #include <asm/cache.h>
19 #include <asm/io.h>
20
21 #include <dma-coherence.h>
22
23 static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
24 {
25         unsigned long addr = plat_dma_addr_to_phys(dma_addr);
26
27         return (unsigned long)phys_to_virt(addr);
28 }
29
30 /*
31  * Warning on the terminology - Linux calls an uncached area coherent;
32  * MIPS terminology calls memory areas with hardware maintained coherency
33  * coherent.
34  */
35
36 static inline int cpu_is_noncoherent_r10000(struct device *dev)
37 {
38         return !plat_device_is_coherent(dev) &&
39                (current_cpu_type() == CPU_R10000 ||
40                current_cpu_type() == CPU_R12000);
41 }
42
43 static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
44 {
45         /* ignore region specifiers */
46         gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
47
48 #ifdef CONFIG_ZONE_DMA
49         if (dev == NULL)
50                 gfp |= __GFP_DMA;
51         else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
52                 gfp |= __GFP_DMA;
53         else
54 #endif
55 #ifdef CONFIG_ZONE_DMA32
56              if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
57                 gfp |= __GFP_DMA32;
58         else
59 #endif
60                 ;
61
62         /* Don't invoke OOM killer */
63         gfp |= __GFP_NORETRY;
64
65         return gfp;
66 }
67
68 void *dma_alloc_noncoherent(struct device *dev, size_t size,
69         dma_addr_t * dma_handle, gfp_t gfp)
70 {
71         void *ret;
72
73         gfp = massage_gfp_flags(dev, gfp);
74
75         ret = (void *) __get_free_pages(gfp, get_order(size));
76
77         if (ret != NULL) {
78                 memset(ret, 0, size);
79                 *dma_handle = plat_map_dma_mem(dev, ret, size);
80         }
81
82         return ret;
83 }
84
85 EXPORT_SYMBOL(dma_alloc_noncoherent);
86
87 void *dma_alloc_coherent(struct device *dev, size_t size,
88         dma_addr_t * dma_handle, gfp_t gfp)
89 {
90         void *ret;
91
92         gfp = massage_gfp_flags(dev, gfp);
93
94         ret = (void *) __get_free_pages(gfp, get_order(size));
95
96         if (ret) {
97                 memset(ret, 0, size);
98                 *dma_handle = plat_map_dma_mem(dev, ret, size);
99
100                 if (!plat_device_is_coherent(dev)) {
101                         dma_cache_wback_inv((unsigned long) ret, size);
102                         ret = UNCAC_ADDR(ret);
103                 }
104         }
105
106         return ret;
107 }
108
109 EXPORT_SYMBOL(dma_alloc_coherent);
110
111 void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
112         dma_addr_t dma_handle)
113 {
114         plat_unmap_dma_mem(dev, dma_handle);
115         free_pages((unsigned long) vaddr, get_order(size));
116 }
117
118 EXPORT_SYMBOL(dma_free_noncoherent);
119
120 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
121         dma_addr_t dma_handle)
122 {
123         unsigned long addr = (unsigned long) vaddr;
124
125         plat_unmap_dma_mem(dev, dma_handle);
126
127         if (!plat_device_is_coherent(dev))
128                 addr = CAC_ADDR(addr);
129
130         free_pages(addr, get_order(size));
131 }
132
133 EXPORT_SYMBOL(dma_free_coherent);
134
135 static inline void __dma_sync(unsigned long addr, size_t size,
136         enum dma_data_direction direction)
137 {
138         switch (direction) {
139         case DMA_TO_DEVICE:
140                 dma_cache_wback(addr, size);
141                 break;
142
143         case DMA_FROM_DEVICE:
144                 dma_cache_inv(addr, size);
145                 break;
146
147         case DMA_BIDIRECTIONAL:
148                 dma_cache_wback_inv(addr, size);
149                 break;
150
151         default:
152                 BUG();
153         }
154 }
155
156 dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
157         enum dma_data_direction direction)
158 {
159         unsigned long addr = (unsigned long) ptr;
160
161         if (!plat_device_is_coherent(dev))
162                 __dma_sync(addr, size, direction);
163
164         return plat_map_dma_mem(dev, ptr, size);
165 }
166
167 EXPORT_SYMBOL(dma_map_single);
168
169 void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
170         enum dma_data_direction direction)
171 {
172         if (cpu_is_noncoherent_r10000(dev))
173                 __dma_sync(dma_addr_to_virt(dma_addr), size,
174                            direction);
175
176         plat_unmap_dma_mem(dev, dma_addr);
177 }
178
179 EXPORT_SYMBOL(dma_unmap_single);
180
181 int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
182         enum dma_data_direction direction)
183 {
184         int i;
185
186         BUG_ON(direction == DMA_NONE);
187
188         for (i = 0; i < nents; i++, sg++) {
189                 unsigned long addr;
190
191                 addr = (unsigned long) sg_virt(sg);
192                 if (!plat_device_is_coherent(dev) && addr)
193                         __dma_sync(addr, sg->length, direction);
194                 sg->dma_address = plat_map_dma_mem(dev,
195                                                    (void *)addr, sg->length);
196         }
197
198         return nents;
199 }
200
201 EXPORT_SYMBOL(dma_map_sg);
202
203 dma_addr_t dma_map_page(struct device *dev, struct page *page,
204         unsigned long offset, size_t size, enum dma_data_direction direction)
205 {
206         BUG_ON(direction == DMA_NONE);
207
208         if (!plat_device_is_coherent(dev)) {
209                 unsigned long addr;
210
211                 addr = (unsigned long) page_address(page) + offset;
212                 __dma_sync(addr, size, direction);
213         }
214
215         return plat_map_dma_mem_page(dev, page) + offset;
216 }
217
218 EXPORT_SYMBOL(dma_map_page);
219
220 void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
221         enum dma_data_direction direction)
222 {
223         unsigned long addr;
224         int i;
225
226         BUG_ON(direction == DMA_NONE);
227
228         for (i = 0; i < nhwentries; i++, sg++) {
229                 if (!plat_device_is_coherent(dev) &&
230                     direction != DMA_TO_DEVICE) {
231                         addr = (unsigned long) sg_virt(sg);
232                         if (addr)
233                                 __dma_sync(addr, sg->length, direction);
234                 }
235                 plat_unmap_dma_mem(dev, sg->dma_address);
236         }
237 }
238
239 EXPORT_SYMBOL(dma_unmap_sg);
240
241 void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
242         size_t size, enum dma_data_direction direction)
243 {
244         BUG_ON(direction == DMA_NONE);
245
246         if (cpu_is_noncoherent_r10000(dev)) {
247                 unsigned long addr;
248
249                 addr = dma_addr_to_virt(dma_handle);
250                 __dma_sync(addr, size, direction);
251         }
252 }
253
254 EXPORT_SYMBOL(dma_sync_single_for_cpu);
255
256 void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
257         size_t size, enum dma_data_direction direction)
258 {
259         BUG_ON(direction == DMA_NONE);
260
261         plat_extra_sync_for_device(dev);
262         if (!plat_device_is_coherent(dev)) {
263                 unsigned long addr;
264
265                 addr = dma_addr_to_virt(dma_handle);
266                 __dma_sync(addr, size, direction);
267         }
268 }
269
270 EXPORT_SYMBOL(dma_sync_single_for_device);
271
272 void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
273         unsigned long offset, size_t size, enum dma_data_direction direction)
274 {
275         BUG_ON(direction == DMA_NONE);
276
277         if (cpu_is_noncoherent_r10000(dev)) {
278                 unsigned long addr;
279
280                 addr = dma_addr_to_virt(dma_handle);
281                 __dma_sync(addr + offset, size, direction);
282         }
283 }
284
285 EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
286
287 void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
288         unsigned long offset, size_t size, enum dma_data_direction direction)
289 {
290         BUG_ON(direction == DMA_NONE);
291
292         plat_extra_sync_for_device(dev);
293         if (!plat_device_is_coherent(dev)) {
294                 unsigned long addr;
295
296                 addr = dma_addr_to_virt(dma_handle);
297                 __dma_sync(addr + offset, size, direction);
298         }
299 }
300
301 EXPORT_SYMBOL(dma_sync_single_range_for_device);
302
303 void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
304         enum dma_data_direction direction)
305 {
306         int i;
307
308         BUG_ON(direction == DMA_NONE);
309
310         /* Make sure that gcc doesn't leave the empty loop body.  */
311         for (i = 0; i < nelems; i++, sg++) {
312                 if (cpu_is_noncoherent_r10000(dev))
313                         __dma_sync((unsigned long)page_address(sg_page(sg)),
314                                    sg->length, direction);
315         }
316 }
317
318 EXPORT_SYMBOL(dma_sync_sg_for_cpu);
319
320 void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
321         enum dma_data_direction direction)
322 {
323         int i;
324
325         BUG_ON(direction == DMA_NONE);
326
327         /* Make sure that gcc doesn't leave the empty loop body.  */
328         for (i = 0; i < nelems; i++, sg++) {
329                 if (!plat_device_is_coherent(dev))
330                         __dma_sync((unsigned long)page_address(sg_page(sg)),
331                                    sg->length, direction);
332         }
333 }
334
335 EXPORT_SYMBOL(dma_sync_sg_for_device);
336
337 int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
338 {
339         return plat_dma_mapping_error(dev, dma_addr);
340 }
341
342 EXPORT_SYMBOL(dma_mapping_error);
343
344 int dma_supported(struct device *dev, u64 mask)
345 {
346         return plat_dma_supported(dev, mask);
347 }
348
349 EXPORT_SYMBOL(dma_supported);
350
351 int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
352 {
353         return plat_device_is_coherent(dev);
354 }
355
356 EXPORT_SYMBOL(dma_is_consistent);
357
358 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
359                enum dma_data_direction direction)
360 {
361         BUG_ON(direction == DMA_NONE);
362
363         plat_extra_sync_for_device(dev);
364         if (!plat_device_is_coherent(dev))
365                 __dma_sync((unsigned long)vaddr, size, direction);
366 }
367
368 EXPORT_SYMBOL(dma_cache_sync);