2 * core routines for the asynchronous memory transfer/transform api
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/rculist.h>
27 #include <linux/kernel.h>
28 #include <linux/async_tx.h>
30 #ifdef CONFIG_DMA_ENGINE
31 static enum dma_state_client
32 dma_channel_add_remove(struct dma_client *client,
33 struct dma_chan *chan, enum dma_state state);
35 static struct dma_client async_tx_dma = {
36 .event_callback = dma_channel_add_remove,
37 /* .cap_mask == 0 defaults to all channels */
41 * async_tx_lock - protect modification of async_tx_master_list and serialize
42 * rebalance operations
44 static DEFINE_SPINLOCK(async_tx_lock);
46 static LIST_HEAD(async_tx_master_list);
48 /* async_tx_issue_pending_all - start all transactions on all channels */
49 void async_tx_issue_pending_all(void)
51 struct dma_chan_ref *ref;
54 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
55 ref->chan->device->device_issue_pending(ref->chan);
58 EXPORT_SYMBOL_GPL(async_tx_issue_pending_all);
61 free_dma_chan_ref(struct rcu_head *rcu)
63 struct dma_chan_ref *ref;
64 ref = container_of(rcu, struct dma_chan_ref, rcu);
69 init_dma_chan_ref(struct dma_chan_ref *ref, struct dma_chan *chan)
71 INIT_LIST_HEAD(&ref->node);
72 INIT_RCU_HEAD(&ref->rcu);
74 atomic_set(&ref->count, 0);
77 static enum dma_state_client
78 dma_channel_add_remove(struct dma_client *client,
79 struct dma_chan *chan, enum dma_state state)
81 unsigned long found, flags;
82 struct dma_chan_ref *master_ref, *ref;
83 enum dma_state_client ack = DMA_DUP; /* default: take no action */
86 case DMA_RESOURCE_AVAILABLE:
89 list_for_each_entry_rcu(ref, &async_tx_master_list, node)
90 if (ref->chan == chan) {
96 pr_debug("async_tx: dma resource available [%s]\n",
97 found ? "old" : "new");
104 /* add the channel to the generic management list */
105 master_ref = kmalloc(sizeof(*master_ref), GFP_KERNEL);
107 init_dma_chan_ref(master_ref, chan);
108 spin_lock_irqsave(&async_tx_lock, flags);
109 list_add_tail_rcu(&master_ref->node,
110 &async_tx_master_list);
111 spin_unlock_irqrestore(&async_tx_lock,
114 printk(KERN_WARNING "async_tx: unable to create"
115 " new master entry in response to"
116 " a DMA_RESOURCE_ADDED event"
121 case DMA_RESOURCE_REMOVED:
123 spin_lock_irqsave(&async_tx_lock, flags);
124 list_for_each_entry(ref, &async_tx_master_list, node)
125 if (ref->chan == chan) {
126 list_del_rcu(&ref->node);
127 call_rcu(&ref->rcu, free_dma_chan_ref);
131 spin_unlock_irqrestore(&async_tx_lock, flags);
133 pr_debug("async_tx: dma resource removed [%s]\n",
134 found ? "ours" : "not ours");
141 case DMA_RESOURCE_SUSPEND:
142 case DMA_RESOURCE_RESUME:
143 printk(KERN_WARNING "async_tx: does not support dma channel"
144 " suspend/resume\n");
153 static int __init async_tx_init(void)
155 dma_async_client_register(&async_tx_dma);
156 dma_async_client_chan_request(&async_tx_dma);
158 printk(KERN_INFO "async_tx: api initialized (async)\n");
163 static void __exit async_tx_exit(void)
165 dma_async_client_unregister(&async_tx_dma);
169 * __async_tx_find_channel - find a channel to carry out the operation or let
170 * the transaction execute synchronously
171 * @depend_tx: transaction dependency
172 * @tx_type: transaction type
175 __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx,
176 enum dma_transaction_type tx_type)
178 /* see if we can keep the chain on one channel */
180 dma_has_cap(tx_type, depend_tx->chan->device->cap_mask))
181 return depend_tx->chan;
182 return dma_find_channel(tx_type);
184 EXPORT_SYMBOL_GPL(__async_tx_find_channel);
186 static int __init async_tx_init(void)
188 printk(KERN_INFO "async_tx: api initialized (sync-only)\n");
192 static void __exit async_tx_exit(void)
200 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
202 * @depend_tx: the operation that must finish before the new operation runs
203 * @tx: the new operation
206 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
207 struct dma_async_tx_descriptor *tx)
209 struct dma_chan *chan;
210 struct dma_device *device;
211 struct dma_async_tx_descriptor *intr_tx = (void *) ~0;
213 /* first check to see if we can still append to depend_tx */
214 spin_lock_bh(&depend_tx->lock);
215 if (depend_tx->parent && depend_tx->chan == tx->chan) {
216 tx->parent = depend_tx;
217 depend_tx->next = tx;
220 spin_unlock_bh(&depend_tx->lock);
225 chan = depend_tx->chan;
226 device = chan->device;
228 /* see if we can schedule an interrupt
229 * otherwise poll for completion
231 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
232 intr_tx = device->device_prep_dma_interrupt(chan, 0);
237 intr_tx->callback = NULL;
238 intr_tx->callback_param = NULL;
239 tx->parent = intr_tx;
240 /* safe to set ->next outside the lock since we know we are
245 /* check if we need to append */
246 spin_lock_bh(&depend_tx->lock);
247 if (depend_tx->parent) {
248 intr_tx->parent = depend_tx;
249 depend_tx->next = intr_tx;
250 async_tx_ack(intr_tx);
253 spin_unlock_bh(&depend_tx->lock);
256 intr_tx->parent = NULL;
257 intr_tx->tx_submit(intr_tx);
258 async_tx_ack(intr_tx);
261 if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
262 panic("%s: DMA_ERROR waiting for depend_tx\n",
270 * submit_disposition - while holding depend_tx->lock we must avoid submitting
271 * new operations to prevent a circular locking dependency with
272 * drivers that already hold a channel lock when calling
273 * async_tx_run_dependencies.
274 * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock
275 * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch
276 * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly
278 enum submit_disposition {
280 ASYNC_TX_CHANNEL_SWITCH,
281 ASYNC_TX_DIRECT_SUBMIT,
285 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
286 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
287 dma_async_tx_callback cb_fn, void *cb_param)
289 tx->callback = cb_fn;
290 tx->callback_param = cb_param;
293 enum submit_disposition s;
295 /* sanity check the dependency chain:
296 * 1/ if ack is already set then we cannot be sure
297 * we are referring to the correct operation
298 * 2/ dependencies are 1:1 i.e. two transactions can
299 * not depend on the same parent
301 BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next ||
304 /* the lock prevents async_tx_run_dependencies from missing
305 * the setting of ->next when ->parent != NULL
307 spin_lock_bh(&depend_tx->lock);
308 if (depend_tx->parent) {
309 /* we have a parent so we can not submit directly
310 * if we are staying on the same channel: append
311 * else: channel switch
313 if (depend_tx->chan == chan) {
314 tx->parent = depend_tx;
315 depend_tx->next = tx;
316 s = ASYNC_TX_SUBMITTED;
318 s = ASYNC_TX_CHANNEL_SWITCH;
320 /* we do not have a parent so we may be able to submit
321 * directly if we are staying on the same channel
323 if (depend_tx->chan == chan)
324 s = ASYNC_TX_DIRECT_SUBMIT;
326 s = ASYNC_TX_CHANNEL_SWITCH;
328 spin_unlock_bh(&depend_tx->lock);
331 case ASYNC_TX_SUBMITTED:
333 case ASYNC_TX_CHANNEL_SWITCH:
334 async_tx_channel_switch(depend_tx, tx);
336 case ASYNC_TX_DIRECT_SUBMIT:
346 if (flags & ASYNC_TX_ACK)
349 if (depend_tx && (flags & ASYNC_TX_DEP_ACK))
350 async_tx_ack(depend_tx);
352 EXPORT_SYMBOL_GPL(async_tx_submit);
355 * async_trigger_callback - schedules the callback function to be run after
356 * any dependent operations have been completed.
357 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
358 * @depend_tx: 'callback' requires the completion of this transaction
359 * @cb_fn: function to call after depend_tx completes
360 * @cb_param: parameter to pass to the callback routine
362 struct dma_async_tx_descriptor *
363 async_trigger_callback(enum async_tx_flags flags,
364 struct dma_async_tx_descriptor *depend_tx,
365 dma_async_tx_callback cb_fn, void *cb_param)
367 struct dma_chan *chan;
368 struct dma_device *device;
369 struct dma_async_tx_descriptor *tx;
372 chan = depend_tx->chan;
373 device = chan->device;
375 /* see if we can schedule an interrupt
376 * otherwise poll for completion
378 if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask))
381 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL;
386 pr_debug("%s: (async)\n", __func__);
388 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
390 pr_debug("%s: (sync)\n", __func__);
392 /* wait for any prerequisite operations */
393 async_tx_quiesce(&depend_tx);
395 async_tx_sync_epilog(cb_fn, cb_param);
400 EXPORT_SYMBOL_GPL(async_trigger_callback);
403 * async_tx_quiesce - ensure tx is complete and freeable upon return
404 * @tx - transaction to quiesce
406 void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
409 /* if ack is already set then we cannot be sure
410 * we are referring to the correct operation
412 BUG_ON(async_tx_test_ack(*tx));
413 if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
414 panic("DMA_ERROR waiting for transaction\n");
419 EXPORT_SYMBOL_GPL(async_tx_quiesce);
421 module_init(async_tx_init);
422 module_exit(async_tx_exit);
424 MODULE_AUTHOR("Intel Corporation");
425 MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API");
426 MODULE_LICENSE("GPL");