Merge branch 'upstream/wm8974' into for-2.6.33
[pandora-kernel.git] / include / asm-generic / dma-mapping-common.h
1 #ifndef _ASM_GENERIC_DMA_MAPPING_H
2 #define _ASM_GENERIC_DMA_MAPPING_H
3
4 #include <linux/kmemcheck.h>
5 #include <linux/scatterlist.h>
6 #include <linux/dma-debug.h>
7 #include <linux/dma-attrs.h>
8
9 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
10                                               size_t size,
11                                               enum dma_data_direction dir,
12                                               struct dma_attrs *attrs)
13 {
14         struct dma_map_ops *ops = get_dma_ops(dev);
15         dma_addr_t addr;
16
17         kmemcheck_mark_initialized(ptr, size);
18         BUG_ON(!valid_dma_direction(dir));
19         addr = ops->map_page(dev, virt_to_page(ptr),
20                              (unsigned long)ptr & ~PAGE_MASK, size,
21                              dir, attrs);
22         debug_dma_map_page(dev, virt_to_page(ptr),
23                            (unsigned long)ptr & ~PAGE_MASK, size,
24                            dir, addr, true);
25         return addr;
26 }
27
28 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
29                                           size_t size,
30                                           enum dma_data_direction dir,
31                                           struct dma_attrs *attrs)
32 {
33         struct dma_map_ops *ops = get_dma_ops(dev);
34
35         BUG_ON(!valid_dma_direction(dir));
36         if (ops->unmap_page)
37                 ops->unmap_page(dev, addr, size, dir, attrs);
38         debug_dma_unmap_page(dev, addr, size, dir, true);
39 }
40
41 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
42                                    int nents, enum dma_data_direction dir,
43                                    struct dma_attrs *attrs)
44 {
45         struct dma_map_ops *ops = get_dma_ops(dev);
46         int i, ents;
47         struct scatterlist *s;
48
49         for_each_sg(sg, s, nents, i)
50                 kmemcheck_mark_initialized(sg_virt(s), s->length);
51         BUG_ON(!valid_dma_direction(dir));
52         ents = ops->map_sg(dev, sg, nents, dir, attrs);
53         debug_dma_map_sg(dev, sg, nents, ents, dir);
54
55         return ents;
56 }
57
58 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
59                                       int nents, enum dma_data_direction dir,
60                                       struct dma_attrs *attrs)
61 {
62         struct dma_map_ops *ops = get_dma_ops(dev);
63
64         BUG_ON(!valid_dma_direction(dir));
65         debug_dma_unmap_sg(dev, sg, nents, dir);
66         if (ops->unmap_sg)
67                 ops->unmap_sg(dev, sg, nents, dir, attrs);
68 }
69
70 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
71                                       size_t offset, size_t size,
72                                       enum dma_data_direction dir)
73 {
74         struct dma_map_ops *ops = get_dma_ops(dev);
75         dma_addr_t addr;
76
77         kmemcheck_mark_initialized(page_address(page) + offset, size);
78         BUG_ON(!valid_dma_direction(dir));
79         addr = ops->map_page(dev, page, offset, size, dir, NULL);
80         debug_dma_map_page(dev, page, offset, size, dir, addr, false);
81
82         return addr;
83 }
84
85 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
86                                   size_t size, enum dma_data_direction dir)
87 {
88         struct dma_map_ops *ops = get_dma_ops(dev);
89
90         BUG_ON(!valid_dma_direction(dir));
91         if (ops->unmap_page)
92                 ops->unmap_page(dev, addr, size, dir, NULL);
93         debug_dma_unmap_page(dev, addr, size, dir, false);
94 }
95
96 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
97                                            size_t size,
98                                            enum dma_data_direction dir)
99 {
100         struct dma_map_ops *ops = get_dma_ops(dev);
101
102         BUG_ON(!valid_dma_direction(dir));
103         if (ops->sync_single_for_cpu)
104                 ops->sync_single_for_cpu(dev, addr, size, dir);
105         debug_dma_sync_single_for_cpu(dev, addr, size, dir);
106         flush_write_buffers();
107 }
108
109 static inline void dma_sync_single_for_device(struct device *dev,
110                                               dma_addr_t addr, size_t size,
111                                               enum dma_data_direction dir)
112 {
113         struct dma_map_ops *ops = get_dma_ops(dev);
114
115         BUG_ON(!valid_dma_direction(dir));
116         if (ops->sync_single_for_device)
117                 ops->sync_single_for_device(dev, addr, size, dir);
118         debug_dma_sync_single_for_device(dev, addr, size, dir);
119         flush_write_buffers();
120 }
121
122 static inline void dma_sync_single_range_for_cpu(struct device *dev,
123                                                  dma_addr_t addr,
124                                                  unsigned long offset,
125                                                  size_t size,
126                                                  enum dma_data_direction dir)
127 {
128         struct dma_map_ops *ops = get_dma_ops(dev);
129
130         BUG_ON(!valid_dma_direction(dir));
131         if (ops->sync_single_range_for_cpu) {
132                 ops->sync_single_range_for_cpu(dev, addr, offset, size, dir);
133                 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
134
135                 flush_write_buffers();
136         } else
137                 dma_sync_single_for_cpu(dev, addr, size, dir);
138 }
139
140 static inline void dma_sync_single_range_for_device(struct device *dev,
141                                                     dma_addr_t addr,
142                                                     unsigned long offset,
143                                                     size_t size,
144                                                     enum dma_data_direction dir)
145 {
146         struct dma_map_ops *ops = get_dma_ops(dev);
147
148         BUG_ON(!valid_dma_direction(dir));
149         if (ops->sync_single_range_for_device) {
150                 ops->sync_single_range_for_device(dev, addr, offset, size, dir);
151                 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
152
153                 flush_write_buffers();
154         } else
155                 dma_sync_single_for_device(dev, addr, size, dir);
156 }
157
158 static inline void
159 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
160                     int nelems, enum dma_data_direction dir)
161 {
162         struct dma_map_ops *ops = get_dma_ops(dev);
163
164         BUG_ON(!valid_dma_direction(dir));
165         if (ops->sync_sg_for_cpu)
166                 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
167         debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
168         flush_write_buffers();
169 }
170
171 static inline void
172 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
173                        int nelems, enum dma_data_direction dir)
174 {
175         struct dma_map_ops *ops = get_dma_ops(dev);
176
177         BUG_ON(!valid_dma_direction(dir));
178         if (ops->sync_sg_for_device)
179                 ops->sync_sg_for_device(dev, sg, nelems, dir);
180         debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
181
182         flush_write_buffers();
183 }
184
185 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
186 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
187 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
188 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
189
190 #endif