1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
9 #include <asm-generic/dma-coherent.h>
10 #include <asm/memory.h>
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
17 #ifndef __arch_page_to_dma
18 static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
23 static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
25 return pfn_to_page(__bus_to_pfn(addr));
28 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
30 return (void *)__bus_to_virt(addr);
33 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
38 static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
40 return __arch_page_to_dma(dev, page);
43 static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
45 return __arch_dma_to_page(dev, addr);
48 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
50 return __arch_dma_to_virt(dev, addr);
53 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
55 return __arch_virt_to_dma(dev, addr);
60 * The DMA API is built upon the notion of "buffer ownership". A buffer
61 * is either exclusively owned by the CPU (and therefore may be accessed
62 * by it) or exclusively owned by the DMA device. These helper functions
63 * represent the transitions between these two ownership states.
65 * Note, however, that on later ARMs, this notion does not work due to
66 * speculative prefetches. We model our approach on the assumption that
67 * the CPU does do speculative prefetches, which means we clean caches
68 * before transfers and delay cache invalidation until transfer completion.
70 * Private support functions: these are not part of the API and are
71 * liable to change. Drivers must not use these.
73 static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
74 enum dma_data_direction dir)
76 extern void ___dma_single_cpu_to_dev(const void *, size_t,
77 enum dma_data_direction);
79 if (!arch_is_coherent())
80 ___dma_single_cpu_to_dev(kaddr, size, dir);
83 static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
84 enum dma_data_direction dir)
86 extern void ___dma_single_dev_to_cpu(const void *, size_t,
87 enum dma_data_direction);
89 if (!arch_is_coherent())
90 ___dma_single_dev_to_cpu(kaddr, size, dir);
93 static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
94 size_t size, enum dma_data_direction dir)
96 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
97 size_t, enum dma_data_direction);
99 if (!arch_is_coherent())
100 ___dma_page_cpu_to_dev(page, off, size, dir);
103 static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
104 size_t size, enum dma_data_direction dir)
106 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
109 if (!arch_is_coherent())
110 ___dma_page_dev_to_cpu(page, off, size, dir);
114 * Return whether the given device DMA address mask can be supported
115 * properly. For example, if your device can only drive the low 24-bits
116 * during bus mastering, then you would pass 0x00ffffff as the mask
119 * FIXME: This should really be a platform specific issue - we should
120 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
122 static inline int dma_supported(struct device *dev, u64 mask)
124 if (mask < ISA_DMA_THRESHOLD)
129 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
131 #ifdef CONFIG_DMABOUNCE
132 if (dev->archdata.dmabounce) {
133 if (dma_mask >= ISA_DMA_THRESHOLD)
139 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
142 *dev->dma_mask = dma_mask;
148 * DMA errors are defined by all-bits-set in the DMA address.
150 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
152 return dma_addr == ~0;
156 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
157 * function so drivers using this API are highlighted with build warnings.
159 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
160 dma_addr_t *handle, gfp_t gfp)
165 static inline void dma_free_noncoherent(struct device *dev, size_t size,
166 void *cpu_addr, dma_addr_t handle)
171 * dma_alloc_coherent - allocate consistent memory for DMA
172 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
173 * @size: required memory size
174 * @handle: bus-specific DMA address
176 * Allocate some uncached, unbuffered memory for a device for
177 * performing DMA. This function allocates pages, and will
178 * return the CPU-viewed address, and sets @handle to be the
179 * device-viewed address.
181 extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
184 * dma_free_coherent - free memory allocated by dma_alloc_coherent
185 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
186 * @size: size of memory originally requested in dma_alloc_coherent
187 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
188 * @handle: device-view address returned from dma_alloc_coherent
190 * Free (and unmap) a DMA buffer previously allocated by
191 * dma_alloc_coherent().
193 * References to memory and mappings associated with cpu_addr/handle
194 * during and after this call executing are illegal.
196 extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
199 * dma_mmap_coherent - map a coherent DMA allocation into user space
200 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
201 * @vma: vm_area_struct describing requested user mapping
202 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
203 * @handle: device-view address returned from dma_alloc_coherent
204 * @size: size of memory originally requested in dma_alloc_coherent
206 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
207 * into user space. The coherent DMA buffer must not be freed by the
208 * driver until the user space mapping has been released.
210 int dma_mmap_coherent(struct device *, struct vm_area_struct *,
211 void *, dma_addr_t, size_t);
215 * dma_alloc_writecombine - allocate writecombining memory for DMA
216 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
217 * @size: required memory size
218 * @handle: bus-specific DMA address
220 * Allocate some uncached, buffered memory for a device for
221 * performing DMA. This function allocates pages, and will
222 * return the CPU-viewed address, and sets @handle to be the
223 * device-viewed address.
225 extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
228 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
229 dma_free_coherent(dev,size,cpu_addr,handle)
231 int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
232 void *, dma_addr_t, size_t);
235 #ifdef CONFIG_DMABOUNCE
237 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
238 * and utilize bounce buffers as needed to work around limited DMA windows.
240 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
241 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
242 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
244 * The following are helper functions used by the dmabounce subystem
249 * dmabounce_register_dev
251 * @dev: valid struct device pointer
252 * @small_buf_size: size of buffers to use with small buffer pool
253 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
255 * This function should be called by low-level platform code to register
256 * a device as requireing DMA buffer bouncing. The function will allocate
257 * appropriate DMA pools for the device.
260 extern int dmabounce_register_dev(struct device *, unsigned long,
264 * dmabounce_unregister_dev
266 * @dev: valid struct device pointer
268 * This function should be called by low-level platform code when device
269 * that was previously registered with dmabounce_register_dev is removed
273 extern void dmabounce_unregister_dev(struct device *);
278 * @dev: valid struct device pointer
279 * @dma_handle: dma_handle of unbounced buffer
280 * @size: size of region being mapped
282 * Platforms that utilize the dmabounce mechanism must implement
285 * The dmabounce routines call this function whenever a dma-mapping
286 * is requested to determine whether a given buffer needs to be bounced
287 * or not. The function must return 0 if the buffer is OK for
288 * DMA access and 1 if the buffer needs to be bounced.
291 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
294 * The DMA API, implemented by dmabounce.c. See below for descriptions.
296 extern dma_addr_t dma_map_single(struct device *, void *, size_t,
297 enum dma_data_direction);
298 extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
299 enum dma_data_direction);
300 extern dma_addr_t dma_map_page(struct device *, struct page *,
301 unsigned long, size_t, enum dma_data_direction);
302 extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
303 enum dma_data_direction);
308 int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
309 size_t, enum dma_data_direction);
310 int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
311 size_t, enum dma_data_direction);
313 static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
314 unsigned long offset, size_t size, enum dma_data_direction dir)
319 static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
320 unsigned long offset, size_t size, enum dma_data_direction dir)
327 * dma_map_single - map a single buffer for streaming DMA
328 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
329 * @cpu_addr: CPU direct mapped address of buffer
330 * @size: size of buffer to map
331 * @dir: DMA transfer direction
333 * Ensure that any data held in the cache is appropriately discarded
336 * The device owns this memory once this call has completed. The CPU
337 * can regain ownership by calling dma_unmap_single() or
338 * dma_sync_single_for_cpu().
340 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
341 size_t size, enum dma_data_direction dir)
343 BUG_ON(!valid_dma_direction(dir));
345 __dma_single_cpu_to_dev(cpu_addr, size, dir);
347 return virt_to_dma(dev, cpu_addr);
351 * dma_map_page - map a portion of a page for streaming DMA
352 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
353 * @page: page that buffer resides in
354 * @offset: offset into page for start of buffer
355 * @size: size of buffer to map
356 * @dir: DMA transfer direction
358 * Ensure that any data held in the cache is appropriately discarded
361 * The device owns this memory once this call has completed. The CPU
362 * can regain ownership by calling dma_unmap_page().
364 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
365 unsigned long offset, size_t size, enum dma_data_direction dir)
367 BUG_ON(!valid_dma_direction(dir));
369 __dma_page_cpu_to_dev(page, offset, size, dir);
371 return page_to_dma(dev, page) + offset;
375 * dma_unmap_single - unmap a single buffer previously mapped
376 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
377 * @handle: DMA address of buffer
378 * @size: size of buffer (same as passed to dma_map_single)
379 * @dir: DMA transfer direction (same as passed to dma_map_single)
381 * Unmap a single streaming mode DMA translation. The handle and size
382 * must match what was provided in the previous dma_map_single() call.
383 * All other usages are undefined.
385 * After this call, reads by the CPU to the buffer are guaranteed to see
386 * whatever the device wrote there.
388 static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
389 size_t size, enum dma_data_direction dir)
391 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
395 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
396 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
397 * @handle: DMA address of buffer
398 * @size: size of buffer (same as passed to dma_map_page)
399 * @dir: DMA transfer direction (same as passed to dma_map_page)
401 * Unmap a page streaming mode DMA translation. The handle and size
402 * must match what was provided in the previous dma_map_page() call.
403 * All other usages are undefined.
405 * After this call, reads by the CPU to the buffer are guaranteed to see
406 * whatever the device wrote there.
408 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
409 size_t size, enum dma_data_direction dir)
411 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
414 #endif /* CONFIG_DMABOUNCE */
417 * dma_sync_single_range_for_cpu
418 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
419 * @handle: DMA address of buffer
420 * @offset: offset of region to start sync
421 * @size: size of region to sync
422 * @dir: DMA transfer direction (same as passed to dma_map_single)
424 * Make physical memory consistent for a single streaming mode DMA
425 * translation after a transfer.
427 * If you perform a dma_map_single() but wish to interrogate the
428 * buffer using the cpu, yet do not wish to teardown the PCI dma
429 * mapping, you must call this function before doing so. At the
430 * next point you give the PCI dma address back to the card, you
431 * must first the perform a dma_sync_for_device, and then the
432 * device again owns the buffer.
434 static inline void dma_sync_single_range_for_cpu(struct device *dev,
435 dma_addr_t handle, unsigned long offset, size_t size,
436 enum dma_data_direction dir)
438 BUG_ON(!valid_dma_direction(dir));
440 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
443 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
446 static inline void dma_sync_single_range_for_device(struct device *dev,
447 dma_addr_t handle, unsigned long offset, size_t size,
448 enum dma_data_direction dir)
450 BUG_ON(!valid_dma_direction(dir));
452 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
455 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
458 static inline void dma_sync_single_for_cpu(struct device *dev,
459 dma_addr_t handle, size_t size, enum dma_data_direction dir)
461 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
464 static inline void dma_sync_single_for_device(struct device *dev,
465 dma_addr_t handle, size_t size, enum dma_data_direction dir)
467 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
471 * The scatter list versions of the above methods.
473 extern int dma_map_sg(struct device *, struct scatterlist *, int,
474 enum dma_data_direction);
475 extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
476 enum dma_data_direction);
477 extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
478 enum dma_data_direction);
479 extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
480 enum dma_data_direction);
483 #endif /* __KERNEL__ */