2 * driver/dma/coh901318_lli.c
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for dma
7 * Author: Per Friden <per.friden@stericsson.com>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/dmapool.h>
13 #include <linux/memory.h>
14 #include <mach/coh901318.h>
16 #include "coh901318_lli.h"
18 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
19 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
20 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
22 #define DEBUGFS_POOL_COUNTER_RESET(pool)
23 #define DEBUGFS_POOL_COUNTER_ADD(pool, add)
26 static struct coh901318_lli *
27 coh901318_lli_next(struct coh901318_lli *data)
29 if (data == NULL || data->link_addr == 0)
32 return (struct coh901318_lli *) data->virt_link_addr;
35 int coh901318_pool_create(struct coh901318_pool *pool,
37 size_t size, size_t align)
39 spin_lock_init(&pool->lock);
41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
43 DEBUGFS_POOL_COUNTER_RESET(pool);
47 int coh901318_pool_destroy(struct coh901318_pool *pool)
50 dma_pool_destroy(pool->dmapool);
54 struct coh901318_lli *
55 coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
58 struct coh901318_lli *head;
59 struct coh901318_lli *lli;
60 struct coh901318_lli *lli_prev;
66 spin_lock(&pool->lock);
68 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
73 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
78 for (i = 1; i < len; i++) {
81 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
86 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
89 lli_prev->link_addr = phy;
90 lli_prev->virt_link_addr = lli;
93 lli->link_addr = 0x00000000U;
95 spin_unlock(&pool->lock);
100 spin_unlock(&pool->lock);
104 lli_prev->link_addr = 0x00000000U;
105 spin_unlock(&pool->lock);
106 coh901318_lli_free(pool, &head);
110 void coh901318_lli_free(struct coh901318_pool *pool,
111 struct coh901318_lli **lli)
113 struct coh901318_lli *l;
114 struct coh901318_lli *next;
124 spin_lock(&pool->lock);
126 while (l->link_addr) {
127 next = l->virt_link_addr;
128 dma_pool_free(pool->dmapool, l, l->phy_this);
129 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
132 dma_pool_free(pool->dmapool, l, l->phy_this);
133 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
135 spin_unlock(&pool->lock);
140 coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
141 struct coh901318_lli *lli,
142 dma_addr_t source, unsigned int size,
143 dma_addr_t destination, u32 ctrl_chained,
147 dma_addr_t src = source;
148 dma_addr_t dst = destination;
153 while (lli->link_addr) {
154 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
158 s -= MAX_DMA_PACKET_SIZE;
159 lli = coh901318_lli_next(lli);
161 src += MAX_DMA_PACKET_SIZE;
162 dst += MAX_DMA_PACKET_SIZE;
165 lli->control = ctrl_eom | s;
169 /* One irq per single transfer */
174 coh901318_lli_fill_single(struct coh901318_pool *pool,
175 struct coh901318_lli *lli,
176 dma_addr_t buf, unsigned int size,
177 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
178 enum dma_data_direction dir)
185 if (dir == DMA_TO_DEVICE) {
189 } else if (dir == DMA_FROM_DEVICE) {
197 while (lli->link_addr) {
198 size_t block_size = MAX_DMA_PACKET_SIZE;
199 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
201 /* If we are on the next-to-final block and there will
202 * be less than half a DMA packet left for the last
203 * block, then we want to make this block a little
204 * smaller to balance the sizes. This is meant to
205 * avoid too small transfers if the buffer size is
206 * (MAX_DMA_PACKET_SIZE*N + 1) */
207 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
208 block_size = MAX_DMA_PACKET_SIZE/2;
214 lli = coh901318_lli_next(lli);
216 if (dir == DMA_TO_DEVICE)
218 else if (dir == DMA_FROM_DEVICE)
222 lli->control = ctrl_eom | s;
226 /* One irq per single transfer */
231 coh901318_lli_fill_sg(struct coh901318_pool *pool,
232 struct coh901318_lli *lli,
233 struct scatterlist *sgl, unsigned int nents,
234 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
236 enum dma_data_direction dir, u32 ctrl_irq_mask)
239 struct scatterlist *sg;
244 u32 bytes_to_transfer;
250 spin_lock(&pool->lock);
252 if (dir == DMA_TO_DEVICE)
254 else if (dir == DMA_FROM_DEVICE)
259 for_each_sg(sgl, sg, nents, i) {
260 if (sg_is_chain(sg)) {
261 /* sg continues to the next sg-element don't
262 * send ctrl_finish until the last
263 * sg-element in the chain
265 ctrl_sg = ctrl_chained;
266 } else if (i == nents - 1)
269 ctrl_sg = ctrl ? ctrl : ctrl_last;
272 if ((ctrl_sg & ctrl_irq_mask))
275 if (dir == DMA_TO_DEVICE)
276 /* increment source address */
277 src = sg_dma_address(sg);
279 /* increment destination address */
280 dst = sg_dma_address(sg);
282 bytes_to_transfer = sg_dma_len(sg);
284 while (bytes_to_transfer) {
287 if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
288 elem_size = MAX_DMA_PACKET_SIZE;
291 elem_size = bytes_to_transfer;
295 lli->control = val | elem_size;
299 if (dir == DMA_FROM_DEVICE)
304 BUG_ON(lli->link_addr & 3);
306 bytes_to_transfer -= elem_size;
307 lli = coh901318_lli_next(lli);
311 spin_unlock(&pool->lock);
313 /* There can be many IRQs per sg transfer */
316 spin_unlock(&pool->lock);