1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
9 #include <asm-generic/dma-coherent.h>
10 #include <asm/memory.h>
12 #ifdef __arch_page_to_dma
13 #error Please update to __arch_pfn_to_dma
17 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
18 * functions used internally by the DMA-mapping API to provide DMA
19 * addresses. They must not be used by drivers.
21 #ifndef __arch_pfn_to_dma
22 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
24 return (dma_addr_t)__pfn_to_bus(pfn);
27 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
29 return __bus_to_pfn(addr);
32 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34 return (void *)__bus_to_virt(addr);
37 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
39 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
42 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
44 return __arch_pfn_to_dma(dev, pfn);
47 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
49 return __arch_dma_to_pfn(dev, addr);
52 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54 return __arch_dma_to_virt(dev, addr);
57 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
59 return __arch_virt_to_dma(dev, addr);
64 * The DMA API is built upon the notion of "buffer ownership". A buffer
65 * is either exclusively owned by the CPU (and therefore may be accessed
66 * by it) or exclusively owned by the DMA device. These helper functions
67 * represent the transitions between these two ownership states.
69 * Note, however, that on later ARMs, this notion does not work due to
70 * speculative prefetches. We model our approach on the assumption that
71 * the CPU does do speculative prefetches, which means we clean caches
72 * before transfers and delay cache invalidation until transfer completion.
74 * Private support functions: these are not part of the API and are
75 * liable to change. Drivers must not use these.
77 static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
78 enum dma_data_direction dir)
80 extern void ___dma_single_cpu_to_dev(const void *, size_t,
81 enum dma_data_direction);
83 if (!arch_is_coherent())
84 ___dma_single_cpu_to_dev(kaddr, size, dir);
87 static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
88 enum dma_data_direction dir)
90 extern void ___dma_single_dev_to_cpu(const void *, size_t,
91 enum dma_data_direction);
93 if (!arch_is_coherent())
94 ___dma_single_dev_to_cpu(kaddr, size, dir);
97 static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
98 size_t size, enum dma_data_direction dir)
100 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
101 size_t, enum dma_data_direction);
103 if (!arch_is_coherent())
104 ___dma_page_cpu_to_dev(page, off, size, dir);
107 static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
108 size_t size, enum dma_data_direction dir)
110 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
111 size_t, enum dma_data_direction);
113 if (!arch_is_coherent())
114 ___dma_page_dev_to_cpu(page, off, size, dir);
118 * Return whether the given device DMA address mask can be supported
119 * properly. For example, if your device can only drive the low 24-bits
120 * during bus mastering, then you would pass 0x00ffffff as the mask
123 * FIXME: This should really be a platform specific issue - we should
124 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
126 static inline int dma_supported(struct device *dev, u64 mask)
128 if (mask < ISA_DMA_THRESHOLD)
133 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
135 #ifdef CONFIG_DMABOUNCE
136 if (dev->archdata.dmabounce) {
137 if (dma_mask >= ISA_DMA_THRESHOLD)
143 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
146 *dev->dma_mask = dma_mask;
152 * DMA errors are defined by all-bits-set in the DMA address.
154 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
156 return dma_addr == ~0;
160 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
161 * function so drivers using this API are highlighted with build warnings.
163 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
164 dma_addr_t *handle, gfp_t gfp)
169 static inline void dma_free_noncoherent(struct device *dev, size_t size,
170 void *cpu_addr, dma_addr_t handle)
175 * dma_alloc_coherent - allocate consistent memory for DMA
176 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
177 * @size: required memory size
178 * @handle: bus-specific DMA address
180 * Allocate some uncached, unbuffered memory for a device for
181 * performing DMA. This function allocates pages, and will
182 * return the CPU-viewed address, and sets @handle to be the
183 * device-viewed address.
185 extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
188 * dma_free_coherent - free memory allocated by dma_alloc_coherent
189 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
190 * @size: size of memory originally requested in dma_alloc_coherent
191 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
192 * @handle: device-view address returned from dma_alloc_coherent
194 * Free (and unmap) a DMA buffer previously allocated by
195 * dma_alloc_coherent().
197 * References to memory and mappings associated with cpu_addr/handle
198 * during and after this call executing are illegal.
200 extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
203 * dma_mmap_coherent - map a coherent DMA allocation into user space
204 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
205 * @vma: vm_area_struct describing requested user mapping
206 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
207 * @handle: device-view address returned from dma_alloc_coherent
208 * @size: size of memory originally requested in dma_alloc_coherent
210 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
211 * into user space. The coherent DMA buffer must not be freed by the
212 * driver until the user space mapping has been released.
214 int dma_mmap_coherent(struct device *, struct vm_area_struct *,
215 void *, dma_addr_t, size_t);
219 * dma_alloc_writecombine - allocate writecombining memory for DMA
220 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
221 * @size: required memory size
222 * @handle: bus-specific DMA address
224 * Allocate some uncached, buffered memory for a device for
225 * performing DMA. This function allocates pages, and will
226 * return the CPU-viewed address, and sets @handle to be the
227 * device-viewed address.
229 extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
232 #define dma_free_writecombine(dev,size,cpu_addr,handle) \
233 dma_free_coherent(dev,size,cpu_addr,handle)
235 int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
236 void *, dma_addr_t, size_t);
239 #ifdef CONFIG_DMABOUNCE
241 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
242 * and utilize bounce buffers as needed to work around limited DMA windows.
244 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
245 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
246 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
248 * The following are helper functions used by the dmabounce subystem
253 * dmabounce_register_dev
255 * @dev: valid struct device pointer
256 * @small_buf_size: size of buffers to use with small buffer pool
257 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
259 * This function should be called by low-level platform code to register
260 * a device as requireing DMA buffer bouncing. The function will allocate
261 * appropriate DMA pools for the device.
264 extern int dmabounce_register_dev(struct device *, unsigned long,
268 * dmabounce_unregister_dev
270 * @dev: valid struct device pointer
272 * This function should be called by low-level platform code when device
273 * that was previously registered with dmabounce_register_dev is removed
277 extern void dmabounce_unregister_dev(struct device *);
282 * @dev: valid struct device pointer
283 * @dma_handle: dma_handle of unbounced buffer
284 * @size: size of region being mapped
286 * Platforms that utilize the dmabounce mechanism must implement
289 * The dmabounce routines call this function whenever a dma-mapping
290 * is requested to determine whether a given buffer needs to be bounced
291 * or not. The function must return 0 if the buffer is OK for
292 * DMA access and 1 if the buffer needs to be bounced.
295 extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
298 * The DMA API, implemented by dmabounce.c. See below for descriptions.
300 extern dma_addr_t dma_map_single(struct device *, void *, size_t,
301 enum dma_data_direction);
302 extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
303 enum dma_data_direction);
304 extern dma_addr_t dma_map_page(struct device *, struct page *,
305 unsigned long, size_t, enum dma_data_direction);
306 extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
307 enum dma_data_direction);
312 int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
313 size_t, enum dma_data_direction);
314 int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
315 size_t, enum dma_data_direction);
317 static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
318 unsigned long offset, size_t size, enum dma_data_direction dir)
323 static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
324 unsigned long offset, size_t size, enum dma_data_direction dir)
331 * dma_map_single - map a single buffer for streaming DMA
332 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
333 * @cpu_addr: CPU direct mapped address of buffer
334 * @size: size of buffer to map
335 * @dir: DMA transfer direction
337 * Ensure that any data held in the cache is appropriately discarded
340 * The device owns this memory once this call has completed. The CPU
341 * can regain ownership by calling dma_unmap_single() or
342 * dma_sync_single_for_cpu().
344 static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
345 size_t size, enum dma_data_direction dir)
347 BUG_ON(!valid_dma_direction(dir));
349 __dma_single_cpu_to_dev(cpu_addr, size, dir);
351 return virt_to_dma(dev, cpu_addr);
355 * dma_map_page - map a portion of a page for streaming DMA
356 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
357 * @page: page that buffer resides in
358 * @offset: offset into page for start of buffer
359 * @size: size of buffer to map
360 * @dir: DMA transfer direction
362 * Ensure that any data held in the cache is appropriately discarded
365 * The device owns this memory once this call has completed. The CPU
366 * can regain ownership by calling dma_unmap_page().
368 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
369 unsigned long offset, size_t size, enum dma_data_direction dir)
371 BUG_ON(!valid_dma_direction(dir));
373 __dma_page_cpu_to_dev(page, offset, size, dir);
375 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
379 * dma_unmap_single - unmap a single buffer previously mapped
380 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
381 * @handle: DMA address of buffer
382 * @size: size of buffer (same as passed to dma_map_single)
383 * @dir: DMA transfer direction (same as passed to dma_map_single)
385 * Unmap a single streaming mode DMA translation. The handle and size
386 * must match what was provided in the previous dma_map_single() call.
387 * All other usages are undefined.
389 * After this call, reads by the CPU to the buffer are guaranteed to see
390 * whatever the device wrote there.
392 static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
393 size_t size, enum dma_data_direction dir)
395 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
399 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
400 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
401 * @handle: DMA address of buffer
402 * @size: size of buffer (same as passed to dma_map_page)
403 * @dir: DMA transfer direction (same as passed to dma_map_page)
405 * Unmap a page streaming mode DMA translation. The handle and size
406 * must match what was provided in the previous dma_map_page() call.
407 * All other usages are undefined.
409 * After this call, reads by the CPU to the buffer are guaranteed to see
410 * whatever the device wrote there.
412 static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
413 size_t size, enum dma_data_direction dir)
415 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
416 handle & ~PAGE_MASK, size, dir);
418 #endif /* CONFIG_DMABOUNCE */
421 * dma_sync_single_range_for_cpu
422 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
423 * @handle: DMA address of buffer
424 * @offset: offset of region to start sync
425 * @size: size of region to sync
426 * @dir: DMA transfer direction (same as passed to dma_map_single)
428 * Make physical memory consistent for a single streaming mode DMA
429 * translation after a transfer.
431 * If you perform a dma_map_single() but wish to interrogate the
432 * buffer using the cpu, yet do not wish to teardown the PCI dma
433 * mapping, you must call this function before doing so. At the
434 * next point you give the PCI dma address back to the card, you
435 * must first the perform a dma_sync_for_device, and then the
436 * device again owns the buffer.
438 static inline void dma_sync_single_range_for_cpu(struct device *dev,
439 dma_addr_t handle, unsigned long offset, size_t size,
440 enum dma_data_direction dir)
442 BUG_ON(!valid_dma_direction(dir));
444 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
447 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
450 static inline void dma_sync_single_range_for_device(struct device *dev,
451 dma_addr_t handle, unsigned long offset, size_t size,
452 enum dma_data_direction dir)
454 BUG_ON(!valid_dma_direction(dir));
456 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
459 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
462 static inline void dma_sync_single_for_cpu(struct device *dev,
463 dma_addr_t handle, size_t size, enum dma_data_direction dir)
465 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
468 static inline void dma_sync_single_for_device(struct device *dev,
469 dma_addr_t handle, size_t size, enum dma_data_direction dir)
471 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
475 * The scatter list versions of the above methods.
477 extern int dma_map_sg(struct device *, struct scatterlist *, int,
478 enum dma_data_direction);
479 extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
480 enum dma_data_direction);
481 extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
482 enum dma_data_direction);
483 extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
484 enum dma_data_direction);
487 #endif /* __KERNEL__ */