[PATCH] ufs: wrong type cast
[pandora-kernel.git] / include / asm-x86_64 / dma-mapping.h
1 #ifndef _X8664_DMA_MAPPING_H
2 #define _X8664_DMA_MAPPING_H 1
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9
10 #include <asm/scatterlist.h>
11 #include <asm/io.h>
12 #include <asm/swiotlb.h>
13
14 struct dma_mapping_ops {
15         int             (*mapping_error)(dma_addr_t dma_addr);
16         void*           (*alloc_coherent)(struct device *dev, size_t size,
17                                 dma_addr_t *dma_handle, gfp_t gfp);
18         void            (*free_coherent)(struct device *dev, size_t size,
19                                 void *vaddr, dma_addr_t dma_handle);
20         dma_addr_t      (*map_single)(struct device *hwdev, void *ptr,
21                                 size_t size, int direction);
22         /* like map_single, but doesn't check the device mask */
23         dma_addr_t      (*map_simple)(struct device *hwdev, char *ptr,
24                                 size_t size, int direction);
25         void            (*unmap_single)(struct device *dev, dma_addr_t addr,
26                                 size_t size, int direction);
27         void            (*sync_single_for_cpu)(struct device *hwdev,
28                                 dma_addr_t dma_handle, size_t size,
29                                 int direction);
30         void            (*sync_single_for_device)(struct device *hwdev,
31                                 dma_addr_t dma_handle, size_t size,
32                                 int direction);
33         void            (*sync_single_range_for_cpu)(struct device *hwdev,
34                                 dma_addr_t dma_handle, unsigned long offset,
35                                 size_t size, int direction);
36         void            (*sync_single_range_for_device)(struct device *hwdev,
37                                 dma_addr_t dma_handle, unsigned long offset,
38                                 size_t size, int direction);
39         void            (*sync_sg_for_cpu)(struct device *hwdev,
40                                 struct scatterlist *sg, int nelems,
41                                 int direction);
42         void            (*sync_sg_for_device)(struct device *hwdev,
43                                 struct scatterlist *sg, int nelems,
44                                 int direction);
45         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46                                 int nents, int direction);
47         void            (*unmap_sg)(struct device *hwdev,
48                                 struct scatterlist *sg, int nents,
49                                 int direction);
50         int             (*dma_supported)(struct device *hwdev, u64 mask);
51         int             is_phys;
52 };
53
54 extern dma_addr_t bad_dma_address;
55 extern struct dma_mapping_ops* dma_ops;
56 extern int iommu_merge;
57
58 static inline int dma_mapping_error(dma_addr_t dma_addr)
59 {
60         if (dma_ops->mapping_error)
61                 return dma_ops->mapping_error(dma_addr);
62
63         return (dma_addr == bad_dma_address);
64 }
65
66 extern void *dma_alloc_coherent(struct device *dev, size_t size,
67                                 dma_addr_t *dma_handle, gfp_t gfp);
68 extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
69                               dma_addr_t dma_handle);
70
71 static inline dma_addr_t
72 dma_map_single(struct device *hwdev, void *ptr, size_t size,
73                int direction)
74 {
75         return dma_ops->map_single(hwdev, ptr, size, direction);
76 }
77
78 static inline void
79 dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
80                  int direction)
81 {
82         dma_ops->unmap_single(dev, addr, size, direction);
83 }
84
85 #define dma_map_page(dev,page,offset,size,dir) \
86         dma_map_single((dev), page_address(page)+(offset), (size), (dir))
87
88 #define dma_unmap_page dma_unmap_single
89
90 static inline void
91 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
92                         size_t size, int direction)
93 {
94         if (dma_ops->sync_single_for_cpu)
95                 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
96                                              direction);
97         flush_write_buffers();
98 }
99
100 static inline void
101 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
102                            size_t size, int direction)
103 {
104         if (dma_ops->sync_single_for_device)
105                 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
106                                                 direction);
107         flush_write_buffers();
108 }
109
110 static inline void
111 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
112                               unsigned long offset, size_t size, int direction)
113 {
114         if (dma_ops->sync_single_range_for_cpu) {
115                 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
116         }
117
118         flush_write_buffers();
119 }
120
121 static inline void
122 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
123                                  unsigned long offset, size_t size, int direction)
124 {
125         if (dma_ops->sync_single_range_for_device)
126                 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
127                                                       offset, size, direction);
128
129         flush_write_buffers();
130 }
131
132 static inline void
133 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
134                     int nelems, int direction)
135 {
136         if (dma_ops->sync_sg_for_cpu)
137                 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
138         flush_write_buffers();
139 }
140
141 static inline void
142 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
143                        int nelems, int direction)
144 {
145         if (dma_ops->sync_sg_for_device) {
146                 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
147         }
148
149         flush_write_buffers();
150 }
151
152 static inline int
153 dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
154 {
155         return dma_ops->map_sg(hwdev, sg, nents, direction);
156 }
157
158 static inline void
159 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
160              int direction)
161 {
162         dma_ops->unmap_sg(hwdev, sg, nents, direction);
163 }
164
165 extern int dma_supported(struct device *hwdev, u64 mask);
166
167 /* same for gart, swiotlb, and nommu */
168 static inline int dma_get_cache_alignment(void)
169 {
170         return boot_cpu_data.x86_clflush_size;
171 }
172
173 #define dma_is_consistent(h) 1
174
175 extern int dma_set_mask(struct device *dev, u64 mask);
176
177 static inline void
178 dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
179 {
180         flush_write_buffers();
181 }
182
183 extern struct device fallback_dev;
184 extern int panic_on_overflow;
185
186 #endif /* _X8664_DMA_MAPPING_H */