powerpc: Tidy up dma_map_ops after adding new hook
[pandora-kernel.git] / arch / powerpc / kernel / dma.c
1 /*
2  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * Provide default implementations of the DMA mapping callbacks for
5  * directly mapped busses.
6  */
7
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <asm/bug.h>
14 #include <asm/abs_addr.h>
15 #include <asm/machdep.h>
16
17 /*
18  * Generic direct DMA implementation
19  *
20  * This implementation supports a per-device offset that can be applied if
21  * the address at which memory is visible to devices is not 0. Platform code
22  * can set archdata.dma_data to an unsigned long holding the offset. By
23  * default the offset is PCI_DRAM_OFFSET.
24  */
25
26
27 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
28                                 dma_addr_t *dma_handle, gfp_t flag)
29 {
30         void *ret;
31 #ifdef CONFIG_NOT_COHERENT_CACHE
32         ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
33         if (ret == NULL)
34                 return NULL;
35         *dma_handle += get_dma_offset(dev);
36         return ret;
37 #else
38         struct page *page;
39         int node = dev_to_node(dev);
40
41         /* ignore region specifiers */
42         flag  &= ~(__GFP_HIGHMEM);
43
44         page = alloc_pages_node(node, flag, get_order(size));
45         if (page == NULL)
46                 return NULL;
47         ret = page_address(page);
48         memset(ret, 0, size);
49         *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
50
51         return ret;
52 #endif
53 }
54
55 void dma_direct_free_coherent(struct device *dev, size_t size,
56                               void *vaddr, dma_addr_t dma_handle)
57 {
58 #ifdef CONFIG_NOT_COHERENT_CACHE
59         __dma_free_coherent(size, vaddr);
60 #else
61         free_pages((unsigned long)vaddr, get_order(size));
62 #endif
63 }
64
65 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
66                              int nents, enum dma_data_direction direction,
67                              struct dma_attrs *attrs)
68 {
69         struct scatterlist *sg;
70         int i;
71
72         for_each_sg(sgl, sg, nents, i) {
73                 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
74                 sg->dma_length = sg->length;
75                 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
76         }
77
78         return nents;
79 }
80
81 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
82                                 int nents, enum dma_data_direction direction,
83                                 struct dma_attrs *attrs)
84 {
85 }
86
87 static int dma_direct_dma_supported(struct device *dev, u64 mask)
88 {
89 #ifdef CONFIG_PPC64
90         /* Could be improved so platforms can set the limit in case
91          * they have limited DMA windows
92          */
93         return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
94 #else
95         return 1;
96 #endif
97 }
98
99 static u64 dma_direct_get_required_mask(struct device *dev)
100 {
101         u64 end, mask;
102
103         end = memblock_end_of_DRAM() + get_dma_offset(dev);
104
105         mask = 1ULL << (fls64(end) - 1);
106         mask += mask - 1;
107
108         return mask;
109 }
110
111 static inline dma_addr_t dma_direct_map_page(struct device *dev,
112                                              struct page *page,
113                                              unsigned long offset,
114                                              size_t size,
115                                              enum dma_data_direction dir,
116                                              struct dma_attrs *attrs)
117 {
118         BUG_ON(dir == DMA_NONE);
119         __dma_sync_page(page, offset, size, dir);
120         return page_to_phys(page) + offset + get_dma_offset(dev);
121 }
122
123 static inline void dma_direct_unmap_page(struct device *dev,
124                                          dma_addr_t dma_address,
125                                          size_t size,
126                                          enum dma_data_direction direction,
127                                          struct dma_attrs *attrs)
128 {
129 }
130
131 #ifdef CONFIG_NOT_COHERENT_CACHE
132 static inline void dma_direct_sync_sg(struct device *dev,
133                 struct scatterlist *sgl, int nents,
134                 enum dma_data_direction direction)
135 {
136         struct scatterlist *sg;
137         int i;
138
139         for_each_sg(sgl, sg, nents, i)
140                 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
141 }
142
143 static inline void dma_direct_sync_single(struct device *dev,
144                                           dma_addr_t dma_handle, size_t size,
145                                           enum dma_data_direction direction)
146 {
147         __dma_sync(bus_to_virt(dma_handle), size, direction);
148 }
149 #endif
150
151 struct dma_map_ops dma_direct_ops = {
152         .alloc_coherent                 = dma_direct_alloc_coherent,
153         .free_coherent                  = dma_direct_free_coherent,
154         .map_sg                         = dma_direct_map_sg,
155         .unmap_sg                       = dma_direct_unmap_sg,
156         .dma_supported                  = dma_direct_dma_supported,
157         .map_page                       = dma_direct_map_page,
158         .unmap_page                     = dma_direct_unmap_page,
159         .get_required_mask              = dma_direct_get_required_mask,
160 #ifdef CONFIG_NOT_COHERENT_CACHE
161         .sync_single_for_cpu            = dma_direct_sync_single,
162         .sync_single_for_device         = dma_direct_sync_single,
163         .sync_sg_for_cpu                = dma_direct_sync_sg,
164         .sync_sg_for_device             = dma_direct_sync_sg,
165 #endif
166 };
167 EXPORT_SYMBOL(dma_direct_ops);
168
169 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
170
171 int dma_set_mask(struct device *dev, u64 dma_mask)
172 {
173         struct dma_map_ops *dma_ops = get_dma_ops(dev);
174
175         if (ppc_md.dma_set_mask)
176                 return ppc_md.dma_set_mask(dev, dma_mask);
177         if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
178                 return dma_ops->set_dma_mask(dev, dma_mask);
179         if (!dev->dma_mask || !dma_supported(dev, dma_mask))
180                 return -EIO;
181         *dev->dma_mask = dma_mask;
182         return 0;
183 }
184 EXPORT_SYMBOL(dma_set_mask);
185
186 u64 dma_get_required_mask(struct device *dev)
187 {
188         struct dma_map_ops *dma_ops = get_dma_ops(dev);
189
190         if (ppc_md.dma_get_required_mask)
191                 return ppc_md.dma_get_required_mask(dev);
192
193         if (unlikely(dma_ops == NULL))
194                 return 0;
195
196         if (dma_ops->get_required_mask)
197                 return dma_ops->get_required_mask(dev);
198
199         return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
200 }
201 EXPORT_SYMBOL_GPL(dma_get_required_mask);
202
203 static int __init dma_init(void)
204 {
205        dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
206
207        return 0;
208 }
209 fs_initcall(dma_init);
210
211 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
212                       void *cpu_addr, dma_addr_t handle, size_t size)
213 {
214         unsigned long pfn;
215
216 #ifdef CONFIG_NOT_COHERENT_CACHE
217         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
218         pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
219 #else
220         pfn = page_to_pfn(virt_to_page(cpu_addr));
221 #endif
222         return remap_pfn_range(vma, vma->vm_start,
223                                pfn + vma->vm_pgoff,
224                                vma->vm_end - vma->vm_start,
225                                vma->vm_page_prot);
226 }
227 EXPORT_SYMBOL_GPL(dma_mmap_coherent);