2 * driver/dma/coh901318_lli.c
4 * Copyright (C) 2007-2009 ST-Ericsson
5 * License terms: GNU General Public License (GPL) version 2
6 * Support functions for handling lli for dma
7 * Author: Per Friden <per.friden@stericsson.com>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/dmapool.h>
13 #include <linux/memory.h>
14 #include <mach/coh901318.h>
16 #include "coh901318_lli.h"
18 #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
19 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
20 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
22 #define DEBUGFS_POOL_COUNTER_RESET(pool)
23 #define DEBUGFS_POOL_COUNTER_ADD(pool, add)
26 static struct coh901318_lli *
27 coh901318_lli_next(struct coh901318_lli *data)
29 if (data == NULL || data->link_addr == 0)
32 return (struct coh901318_lli *) data->virt_link_addr;
35 int coh901318_pool_create(struct coh901318_pool *pool,
37 size_t size, size_t align)
39 spin_lock_init(&pool->lock);
41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
43 DEBUGFS_POOL_COUNTER_RESET(pool);
47 int coh901318_pool_destroy(struct coh901318_pool *pool)
50 dma_pool_destroy(pool->dmapool);
54 struct coh901318_lli *
55 coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
58 struct coh901318_lli *head;
59 struct coh901318_lli *lli;
60 struct coh901318_lli *lli_prev;
66 spin_lock(&pool->lock);
68 head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
73 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
77 lli->link_addr = 0x00000000;
78 lli->virt_link_addr = 0x00000000U;
80 for (i = 1; i < len; i++) {
83 lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
88 DEBUGFS_POOL_COUNTER_ADD(pool, 1);
90 lli->link_addr = 0x00000000;
91 lli->virt_link_addr = 0x00000000U;
93 lli_prev->link_addr = phy;
94 lli_prev->virt_link_addr = lli;
97 spin_unlock(&pool->lock);
102 spin_unlock(&pool->lock);
106 lli_prev->link_addr = 0x00000000U;
107 spin_unlock(&pool->lock);
108 coh901318_lli_free(pool, &head);
112 void coh901318_lli_free(struct coh901318_pool *pool,
113 struct coh901318_lli **lli)
115 struct coh901318_lli *l;
116 struct coh901318_lli *next;
126 spin_lock(&pool->lock);
128 while (l->link_addr) {
129 next = l->virt_link_addr;
130 dma_pool_free(pool->dmapool, l, l->phy_this);
131 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
134 dma_pool_free(pool->dmapool, l, l->phy_this);
135 DEBUGFS_POOL_COUNTER_ADD(pool, -1);
137 spin_unlock(&pool->lock);
142 coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
143 struct coh901318_lli *lli,
144 dma_addr_t source, unsigned int size,
145 dma_addr_t destination, u32 ctrl_chained,
149 dma_addr_t src = source;
150 dma_addr_t dst = destination;
155 while (lli->link_addr) {
156 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
160 s -= MAX_DMA_PACKET_SIZE;
161 lli = coh901318_lli_next(lli);
163 src += MAX_DMA_PACKET_SIZE;
164 dst += MAX_DMA_PACKET_SIZE;
167 lli->control = ctrl_eom | s;
175 coh901318_lli_fill_single(struct coh901318_pool *pool,
176 struct coh901318_lli *lli,
177 dma_addr_t buf, unsigned int size,
178 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
179 enum dma_data_direction dir)
186 if (dir == DMA_TO_DEVICE) {
190 } else if (dir == DMA_FROM_DEVICE) {
198 while (lli->link_addr) {
199 size_t block_size = MAX_DMA_PACKET_SIZE;
200 lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
202 /* If we are on the next-to-final block and there will
203 * be less than half a DMA packet left for the last
204 * block, then we want to make this block a little
205 * smaller to balance the sizes. This is meant to
206 * avoid too small transfers if the buffer size is
207 * (MAX_DMA_PACKET_SIZE*N + 1) */
208 if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
209 block_size = MAX_DMA_PACKET_SIZE/2;
215 lli = coh901318_lli_next(lli);
217 if (dir == DMA_TO_DEVICE)
219 else if (dir == DMA_FROM_DEVICE)
223 lli->control = ctrl_eom | s;
231 coh901318_lli_fill_sg(struct coh901318_pool *pool,
232 struct coh901318_lli *lli,
233 struct scatterlist *sgl, unsigned int nents,
234 dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
236 enum dma_data_direction dir, u32 ctrl_irq_mask)
239 struct scatterlist *sg;
243 u32 bytes_to_transfer;
249 spin_lock(&pool->lock);
251 if (dir == DMA_TO_DEVICE)
253 else if (dir == DMA_FROM_DEVICE)
258 for_each_sg(sgl, sg, nents, i) {
259 if (sg_is_chain(sg)) {
260 /* sg continues to the next sg-element don't
261 * send ctrl_finish until the last
262 * sg-element in the chain
264 ctrl_sg = ctrl_chained;
265 } else if (i == nents - 1)
268 ctrl_sg = ctrl ? ctrl : ctrl_last;
271 if (dir == DMA_TO_DEVICE)
272 /* increment source address */
275 /* increment destination address */
278 bytes_to_transfer = sg_dma_len(sg);
280 while (bytes_to_transfer) {
283 if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
284 elem_size = MAX_DMA_PACKET_SIZE;
287 elem_size = bytes_to_transfer;
291 lli->control = val | elem_size;
295 if (dir == DMA_FROM_DEVICE)
300 BUG_ON(lli->link_addr & 3);
302 bytes_to_transfer -= elem_size;
303 lli = coh901318_lli_next(lli);
307 spin_unlock(&pool->lock);
311 spin_unlock(&pool->lock);