Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6
[pandora-kernel.git] / arch / x86 / include / asm / dma-mapping.h
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5  * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6  * Documentation/DMA-API.txt for documentation.
7  */
8
9 #include <linux/kmemcheck.h>
10 #include <linux/scatterlist.h>
11 #include <linux/dma-debug.h>
12 #include <linux/dma-attrs.h>
13 #include <asm/io.h>
14 #include <asm/swiotlb.h>
15 #include <asm-generic/dma-coherent.h>
16
17 extern dma_addr_t bad_dma_address;
18 extern int iommu_merge;
19 extern struct device x86_dma_fallback_dev;
20 extern int panic_on_overflow;
21
22 extern struct dma_map_ops *dma_ops;
23
24 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
25 {
26 #ifdef CONFIG_X86_32
27         return dma_ops;
28 #else
29         if (unlikely(!dev) || !dev->archdata.dma_ops)
30                 return dma_ops;
31         else
32                 return dev->archdata.dma_ops;
33 #endif
34 }
35
36 /* Make sure we keep the same behaviour */
37 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
38 {
39         struct dma_map_ops *ops = get_dma_ops(dev);
40         if (ops->mapping_error)
41                 return ops->mapping_error(dev, dma_addr);
42
43         return (dma_addr == bad_dma_address);
44 }
45
46 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
47 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
48 #define dma_is_consistent(d, h) (1)
49
50 extern int dma_supported(struct device *hwdev, u64 mask);
51 extern int dma_set_mask(struct device *dev, u64 mask);
52
53 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
54                                         dma_addr_t *dma_addr, gfp_t flag);
55
56 static inline dma_addr_t
57 dma_map_single(struct device *hwdev, void *ptr, size_t size,
58                enum dma_data_direction dir)
59 {
60         struct dma_map_ops *ops = get_dma_ops(hwdev);
61         dma_addr_t addr;
62
63         BUG_ON(!valid_dma_direction(dir));
64         kmemcheck_mark_initialized(ptr, size);
65         addr = ops->map_page(hwdev, virt_to_page(ptr),
66                              (unsigned long)ptr & ~PAGE_MASK, size,
67                              dir, NULL);
68         debug_dma_map_page(hwdev, virt_to_page(ptr),
69                            (unsigned long)ptr & ~PAGE_MASK, size,
70                            dir, addr, true);
71         return addr;
72 }
73
74 static inline void
75 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
76                  enum dma_data_direction dir)
77 {
78         struct dma_map_ops *ops = get_dma_ops(dev);
79
80         BUG_ON(!valid_dma_direction(dir));
81         if (ops->unmap_page)
82                 ops->unmap_page(dev, addr, size, dir, NULL);
83         debug_dma_unmap_page(dev, addr, size, dir, true);
84 }
85
86 static inline int
87 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
88            int nents, enum dma_data_direction dir)
89 {
90         struct dma_map_ops *ops = get_dma_ops(hwdev);
91         int ents;
92         struct scatterlist *s;
93         int i;
94
95         BUG_ON(!valid_dma_direction(dir));
96         for_each_sg(sg, s, nents, i)
97                 kmemcheck_mark_initialized(sg_virt(s), s->length);
98         ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
99         debug_dma_map_sg(hwdev, sg, nents, ents, dir);
100
101         return ents;
102 }
103
104 static inline void
105 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
106              enum dma_data_direction dir)
107 {
108         struct dma_map_ops *ops = get_dma_ops(hwdev);
109
110         BUG_ON(!valid_dma_direction(dir));
111         debug_dma_unmap_sg(hwdev, sg, nents, dir);
112         if (ops->unmap_sg)
113                 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
114 }
115
116 static inline void
117 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
118                         size_t size, enum dma_data_direction dir)
119 {
120         struct dma_map_ops *ops = get_dma_ops(hwdev);
121
122         BUG_ON(!valid_dma_direction(dir));
123         if (ops->sync_single_for_cpu)
124                 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
125         debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
126         flush_write_buffers();
127 }
128
129 static inline void
130 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
131                            size_t size, enum dma_data_direction dir)
132 {
133         struct dma_map_ops *ops = get_dma_ops(hwdev);
134
135         BUG_ON(!valid_dma_direction(dir));
136         if (ops->sync_single_for_device)
137                 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
138         debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
139         flush_write_buffers();
140 }
141
142 static inline void
143 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
144                               unsigned long offset, size_t size,
145                               enum dma_data_direction dir)
146 {
147         struct dma_map_ops *ops = get_dma_ops(hwdev);
148
149         BUG_ON(!valid_dma_direction(dir));
150         if (ops->sync_single_range_for_cpu)
151                 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
152                                                size, dir);
153         debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
154                                             offset, size, dir);
155         flush_write_buffers();
156 }
157
158 static inline void
159 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
160                                  unsigned long offset, size_t size,
161                                  enum dma_data_direction dir)
162 {
163         struct dma_map_ops *ops = get_dma_ops(hwdev);
164
165         BUG_ON(!valid_dma_direction(dir));
166         if (ops->sync_single_range_for_device)
167                 ops->sync_single_range_for_device(hwdev, dma_handle,
168                                                   offset, size, dir);
169         debug_dma_sync_single_range_for_device(hwdev, dma_handle,
170                                                offset, size, dir);
171         flush_write_buffers();
172 }
173
174 static inline void
175 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
176                     int nelems, enum dma_data_direction dir)
177 {
178         struct dma_map_ops *ops = get_dma_ops(hwdev);
179
180         BUG_ON(!valid_dma_direction(dir));
181         if (ops->sync_sg_for_cpu)
182                 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
183         debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
184         flush_write_buffers();
185 }
186
187 static inline void
188 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
189                        int nelems, enum dma_data_direction dir)
190 {
191         struct dma_map_ops *ops = get_dma_ops(hwdev);
192
193         BUG_ON(!valid_dma_direction(dir));
194         if (ops->sync_sg_for_device)
195                 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
196         debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
197
198         flush_write_buffers();
199 }
200
201 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
202                                       size_t offset, size_t size,
203                                       enum dma_data_direction dir)
204 {
205         struct dma_map_ops *ops = get_dma_ops(dev);
206         dma_addr_t addr;
207
208         BUG_ON(!valid_dma_direction(dir));
209         kmemcheck_mark_initialized(page_address(page) + offset, size);
210         addr = ops->map_page(dev, page, offset, size, dir, NULL);
211         debug_dma_map_page(dev, page, offset, size, dir, addr, false);
212
213         return addr;
214 }
215
216 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
217                                   size_t size, enum dma_data_direction dir)
218 {
219         struct dma_map_ops *ops = get_dma_ops(dev);
220
221         BUG_ON(!valid_dma_direction(dir));
222         if (ops->unmap_page)
223                 ops->unmap_page(dev, addr, size, dir, NULL);
224         debug_dma_unmap_page(dev, addr, size, dir, false);
225 }
226
227 static inline void
228 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
229         enum dma_data_direction dir)
230 {
231         flush_write_buffers();
232 }
233
234 static inline int dma_get_cache_alignment(void)
235 {
236         /* no easy way to get cache size on all x86, so return the
237          * maximum possible, to be safe */
238         return boot_cpu_data.x86_clflush_size;
239 }
240
241 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
242                                                     gfp_t gfp)
243 {
244         unsigned long dma_mask = 0;
245
246         dma_mask = dev->coherent_dma_mask;
247         if (!dma_mask)
248                 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
249
250         return dma_mask;
251 }
252
253 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
254 {
255         unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
256
257         if (dma_mask <= DMA_BIT_MASK(24))
258                 gfp |= GFP_DMA;
259 #ifdef CONFIG_X86_64
260         if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
261                 gfp |= GFP_DMA32;
262 #endif
263        return gfp;
264 }
265
266 static inline void *
267 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
268                 gfp_t gfp)
269 {
270         struct dma_map_ops *ops = get_dma_ops(dev);
271         void *memory;
272
273         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
274
275         if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
276                 return memory;
277
278         if (!dev) {
279                 dev = &x86_dma_fallback_dev;
280                 gfp |= GFP_DMA;
281         }
282
283         if (!is_device_dma_capable(dev))
284                 return NULL;
285
286         if (!ops->alloc_coherent)
287                 return NULL;
288
289         memory = ops->alloc_coherent(dev, size, dma_handle,
290                                      dma_alloc_coherent_gfp_flags(dev, gfp));
291         debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
292
293         return memory;
294 }
295
296 static inline void dma_free_coherent(struct device *dev, size_t size,
297                                      void *vaddr, dma_addr_t bus)
298 {
299         struct dma_map_ops *ops = get_dma_ops(dev);
300
301         WARN_ON(irqs_disabled());       /* for portability */
302
303         if (dma_release_from_coherent(dev, get_order(size), vaddr))
304                 return;
305
306         debug_dma_free_coherent(dev, size, vaddr, bus);
307         if (ops->free_coherent)
308                 ops->free_coherent(dev, size, vaddr, bus);
309 }
310
311 #endif