1 /* linux/drivers/dma/pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/dmaengine.h>
17 #include <linux/interrupt.h>
18 #include <linux/amba/bus.h>
19 #include <linux/amba/pl330.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/scatterlist.h>
23 #define NR_DEFAULT_DESC 16
26 /* In the DMAC pool */
29 * Allocted to some channel during prep_xxx
30 * Also may be sitting on the work_list.
34 * Sitting on the work_list and already submitted
35 * to the PL330 core. Not more than two descriptors
36 * of a channel can be BUSY at any time.
40 * Sitting on the channel work_list but xfer done
46 struct dma_pl330_chan {
47 /* Schedule desc completion */
48 struct tasklet_struct task;
50 /* DMA-Engine Channel */
53 /* Last completed cookie */
54 dma_cookie_t completed;
56 /* List of to be xfered descriptors */
57 struct list_head work_list;
59 /* Pointer to the DMAC that manages this channel,
60 * NULL if the channel is available to be acquired.
61 * As the parent, this DMAC also provides descriptors
64 struct dma_pl330_dmac *dmac;
66 /* To protect channel manipulation */
69 /* Token of a hardware channel thread of PL330 DMAC
70 * NULL if the channel is available to be acquired.
74 /* For D-to-M and M-to-D channels */
75 int burst_sz; /* the peripheral fifo width */
79 struct dma_pl330_dmac {
80 struct pl330_info pif;
82 /* DMA-Engine Device */
83 struct dma_device ddma;
85 /* Pool of descriptors available for the DMAC's channels */
86 struct list_head desc_pool;
87 /* To protect desc_pool manipulation */
90 /* Peripheral channels connected to this DMAC */
91 struct dma_pl330_chan *peripherals; /* keep at end */
96 struct dma_pl330_desc {
97 /* To attach to a queue as child */
98 struct list_head node;
100 /* Descriptor for the DMA Engine API */
101 struct dma_async_tx_descriptor txd;
103 /* Xfer for PL330 core */
104 struct pl330_xfer px;
106 struct pl330_reqcfg rqcfg;
107 struct pl330_req req;
109 enum desc_status status;
111 /* The channel which currently holds this desc */
112 struct dma_pl330_chan *pchan;
115 static inline struct dma_pl330_chan *
116 to_pchan(struct dma_chan *ch)
121 return container_of(ch, struct dma_pl330_chan, chan);
124 static inline struct dma_pl330_desc *
125 to_desc(struct dma_async_tx_descriptor *tx)
127 return container_of(tx, struct dma_pl330_desc, txd);
130 static inline void free_desc_list(struct list_head *list)
132 struct dma_pl330_dmac *pdmac;
133 struct dma_pl330_desc *desc;
134 struct dma_pl330_chan *pch;
137 if (list_empty(list))
140 /* Finish off the work list */
141 list_for_each_entry(desc, list, node) {
142 dma_async_tx_callback callback;
145 /* All desc in a list belong to same channel */
147 callback = desc->txd.callback;
148 param = desc->txd.callback_param;
158 spin_lock_irqsave(&pdmac->pool_lock, flags);
159 list_splice_tail_init(list, &pdmac->desc_pool);
160 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
163 static inline void fill_queue(struct dma_pl330_chan *pch)
165 struct dma_pl330_desc *desc;
168 list_for_each_entry(desc, &pch->work_list, node) {
170 /* If already submitted */
171 if (desc->status == BUSY)
174 ret = pl330_submit_req(pch->pl330_chid,
179 } else if (ret == -EAGAIN) {
180 /* QFull or DMAC Dying */
183 /* Unacceptable request */
185 dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n",
186 __func__, __LINE__, desc->txd.cookie);
187 tasklet_schedule(&pch->task);
192 static void pl330_tasklet(unsigned long data)
194 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
195 struct dma_pl330_desc *desc, *_dt;
199 spin_lock_irqsave(&pch->lock, flags);
201 /* Pick up ripe tomatoes */
202 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
203 if (desc->status == DONE) {
204 pch->completed = desc->txd.cookie;
205 list_move_tail(&desc->node, &list);
208 /* Try to submit a req imm. next to the last completed cookie */
211 /* Make sure the PL330 Channel thread is active */
212 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START);
214 spin_unlock_irqrestore(&pch->lock, flags);
216 free_desc_list(&list);
219 static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
221 struct dma_pl330_desc *desc = token;
222 struct dma_pl330_chan *pch = desc->pchan;
225 /* If desc aborted */
229 spin_lock_irqsave(&pch->lock, flags);
233 spin_unlock_irqrestore(&pch->lock, flags);
235 tasklet_schedule(&pch->task);
238 static int pl330_alloc_chan_resources(struct dma_chan *chan)
240 struct dma_pl330_chan *pch = to_pchan(chan);
241 struct dma_pl330_dmac *pdmac = pch->dmac;
244 spin_lock_irqsave(&pch->lock, flags);
246 pch->completed = chan->cookie = 1;
248 pch->pl330_chid = pl330_request_channel(&pdmac->pif);
249 if (!pch->pl330_chid) {
250 spin_unlock_irqrestore(&pch->lock, flags);
254 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
256 spin_unlock_irqrestore(&pch->lock, flags);
261 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg)
263 struct dma_pl330_chan *pch = to_pchan(chan);
264 struct dma_pl330_desc *desc;
267 /* Only supports DMA_TERMINATE_ALL */
268 if (cmd != DMA_TERMINATE_ALL)
271 spin_lock_irqsave(&pch->lock, flags);
273 /* FLUSH the PL330 Channel thread */
274 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH);
276 /* Mark all desc done */
277 list_for_each_entry(desc, &pch->work_list, node)
280 spin_unlock_irqrestore(&pch->lock, flags);
282 pl330_tasklet((unsigned long) pch);
287 static void pl330_free_chan_resources(struct dma_chan *chan)
289 struct dma_pl330_chan *pch = to_pchan(chan);
292 spin_lock_irqsave(&pch->lock, flags);
294 tasklet_kill(&pch->task);
296 pl330_release_channel(pch->pl330_chid);
297 pch->pl330_chid = NULL;
299 spin_unlock_irqrestore(&pch->lock, flags);
302 static enum dma_status
303 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
304 struct dma_tx_state *txstate)
306 struct dma_pl330_chan *pch = to_pchan(chan);
307 dma_cookie_t last_done, last_used;
310 last_done = pch->completed;
311 last_used = chan->cookie;
313 ret = dma_async_is_complete(cookie, last_done, last_used);
315 dma_set_tx_state(txstate, last_done, last_used, 0);
320 static void pl330_issue_pending(struct dma_chan *chan)
322 pl330_tasklet((unsigned long) to_pchan(chan));
326 * We returned the last one of the circular list of descriptor(s)
327 * from prep_xxx, so the argument to submit corresponds to the last
328 * descriptor of the list.
330 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
332 struct dma_pl330_desc *desc, *last = to_desc(tx);
333 struct dma_pl330_chan *pch = to_pchan(tx->chan);
337 spin_lock_irqsave(&pch->lock, flags);
339 /* Assign cookies to all nodes */
340 cookie = tx->chan->cookie;
342 while (!list_empty(&last->node)) {
343 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
347 desc->txd.cookie = cookie;
349 list_move_tail(&desc->node, &pch->work_list);
354 last->txd.cookie = cookie;
356 list_add_tail(&last->node, &pch->work_list);
358 tx->chan->cookie = cookie;
360 spin_unlock_irqrestore(&pch->lock, flags);
365 static inline void _init_desc(struct dma_pl330_desc *desc)
368 desc->req.x = &desc->px;
369 desc->req.token = desc;
370 desc->rqcfg.swap = SWAP_NO;
371 desc->rqcfg.privileged = 0;
372 desc->rqcfg.insnaccess = 0;
373 desc->rqcfg.scctl = SCCTRL0;
374 desc->rqcfg.dcctl = DCCTRL0;
375 desc->req.cfg = &desc->rqcfg;
376 desc->req.xfer_cb = dma_pl330_rqcb;
377 desc->txd.tx_submit = pl330_tx_submit;
379 INIT_LIST_HEAD(&desc->node);
382 /* Returns the number of descriptors added to the DMAC pool */
383 int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
385 struct dma_pl330_desc *desc;
392 desc = kmalloc(count * sizeof(*desc), flg);
396 spin_lock_irqsave(&pdmac->pool_lock, flags);
398 for (i = 0; i < count; i++) {
399 _init_desc(&desc[i]);
400 list_add_tail(&desc[i].node, &pdmac->desc_pool);
403 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
408 static struct dma_pl330_desc *
409 pluck_desc(struct dma_pl330_dmac *pdmac)
411 struct dma_pl330_desc *desc = NULL;
417 spin_lock_irqsave(&pdmac->pool_lock, flags);
419 if (!list_empty(&pdmac->desc_pool)) {
420 desc = list_entry(pdmac->desc_pool.next,
421 struct dma_pl330_desc, node);
423 list_del_init(&desc->node);
426 desc->txd.callback = NULL;
429 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
434 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
436 struct dma_pl330_dmac *pdmac = pch->dmac;
437 struct dma_pl330_peri *peri = pch->chan.private;
438 struct dma_pl330_desc *desc;
440 /* Pluck one desc from the pool of DMAC */
441 desc = pluck_desc(pdmac);
443 /* If the DMAC pool is empty, alloc new */
445 if (!add_desc(pdmac, GFP_ATOMIC, 1))
449 desc = pluck_desc(pdmac);
451 dev_err(pch->dmac->pif.dev,
452 "%s:%d ALERT!\n", __func__, __LINE__);
457 /* Initialize the descriptor */
459 desc->txd.cookie = 0;
460 async_tx_ack(&desc->txd);
463 desc->req.rqtype = peri->rqtype;
464 desc->req.peri = pch->chan.chan_id;
466 desc->req.rqtype = MEMTOMEM;
470 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
475 static inline void fill_px(struct pl330_xfer *px,
476 dma_addr_t dst, dma_addr_t src, size_t len)
484 static struct dma_pl330_desc *
485 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
486 dma_addr_t src, size_t len)
488 struct dma_pl330_desc *desc = pl330_get_desc(pch);
491 dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
497 * Ideally we should lookout for reqs bigger than
498 * those that can be programmed with 256 bytes of
499 * MC buffer, but considering a req size is seldom
500 * going to be word-unaligned and more than 200MB,
502 * Also, should the limit is reached we'd rather
503 * have the platform increase MC buffer size than
504 * complicating this API driver.
506 fill_px(&desc->px, dst, src, len);
511 /* Call after fixing burst size */
512 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
514 struct dma_pl330_chan *pch = desc->pchan;
515 struct pl330_info *pi = &pch->dmac->pif;
518 burst_len = pi->pcfg.data_bus_width / 8;
519 burst_len *= pi->pcfg.data_buf_dep;
520 burst_len >>= desc->rqcfg.brst_size;
522 /* src/dst_burst_len can't be more than 16 */
526 while (burst_len > 1) {
527 if (!(len % (burst_len << desc->rqcfg.brst_size)))
535 static struct dma_async_tx_descriptor *
536 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
537 dma_addr_t src, size_t len, unsigned long flags)
539 struct dma_pl330_desc *desc;
540 struct dma_pl330_chan *pch = to_pchan(chan);
541 struct dma_pl330_peri *peri = chan->private;
542 struct pl330_info *pi;
545 if (unlikely(!pch || !len))
548 if (peri && peri->rqtype != MEMTOMEM)
551 pi = &pch->dmac->pif;
553 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
557 desc->rqcfg.src_inc = 1;
558 desc->rqcfg.dst_inc = 1;
560 /* Select max possible burst size */
561 burst = pi->pcfg.data_bus_width / 8;
569 desc->rqcfg.brst_size = 0;
570 while (burst != (1 << desc->rqcfg.brst_size))
571 desc->rqcfg.brst_size++;
573 desc->rqcfg.brst_len = get_burst_len(desc, len);
575 desc->txd.flags = flags;
580 static struct dma_async_tx_descriptor *
581 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
582 unsigned int sg_len, enum dma_data_direction direction,
585 struct dma_pl330_desc *first, *desc = NULL;
586 struct dma_pl330_chan *pch = to_pchan(chan);
587 struct dma_pl330_peri *peri = chan->private;
588 struct scatterlist *sg;
593 if (unlikely(!pch || !sgl || !sg_len || !peri))
596 /* Make sure the direction is consistent */
597 if ((direction == DMA_TO_DEVICE &&
598 peri->rqtype != MEMTODEV) ||
599 (direction == DMA_FROM_DEVICE &&
600 peri->rqtype != DEVTOMEM)) {
601 dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
606 addr = pch->fifo_addr;
610 for_each_sg(sgl, sg, sg_len, i) {
612 desc = pl330_get_desc(pch);
614 struct dma_pl330_dmac *pdmac = pch->dmac;
616 dev_err(pch->dmac->pif.dev,
617 "%s:%d Unable to fetch desc\n",
622 spin_lock_irqsave(&pdmac->pool_lock, flags);
624 while (!list_empty(&first->node)) {
625 desc = list_entry(first->node.next,
626 struct dma_pl330_desc, node);
627 list_move_tail(&desc->node, &pdmac->desc_pool);
630 list_move_tail(&first->node, &pdmac->desc_pool);
632 spin_unlock_irqrestore(&pdmac->pool_lock, flags);
640 list_add_tail(&desc->node, &first->node);
642 if (direction == DMA_TO_DEVICE) {
643 desc->rqcfg.src_inc = 1;
644 desc->rqcfg.dst_inc = 0;
646 addr, sg_dma_address(sg), sg_dma_len(sg));
648 desc->rqcfg.src_inc = 0;
649 desc->rqcfg.dst_inc = 1;
651 sg_dma_address(sg), addr, sg_dma_len(sg));
654 desc->rqcfg.brst_size = pch->burst_sz;
655 desc->rqcfg.brst_len = 1;
658 /* Return the last desc in the chain */
659 desc->txd.flags = flg;
663 static irqreturn_t pl330_irq_handler(int irq, void *data)
665 if (pl330_update(data))
672 pl330_probe(struct amba_device *adev, const struct amba_id *id)
674 struct dma_pl330_platdata *pdat;
675 struct dma_pl330_dmac *pdmac;
676 struct dma_pl330_chan *pch;
677 struct pl330_info *pi;
678 struct dma_device *pd;
679 struct resource *res;
683 pdat = adev->dev.platform_data;
685 /* Allocate a new DMAC and its Channels */
686 pdmac = kzalloc(sizeof(*pdmac), GFP_KERNEL);
688 dev_err(&adev->dev, "unable to allocate mem\n");
693 pi->dev = &adev->dev;
694 pi->pl330_data = NULL;
695 pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
698 request_mem_region(res->start, resource_size(res), "dma-pl330");
700 pi->base = ioremap(res->start, resource_size(res));
706 pdmac->clk = clk_get(&adev->dev, "dma");
707 if (IS_ERR(pdmac->clk)) {
708 dev_err(&adev->dev, "Cannot get operation clock.\n");
713 amba_set_drvdata(adev, pdmac);
715 #ifdef CONFIG_PM_RUNTIME
716 /* to use the runtime PM helper functions */
717 pm_runtime_enable(&adev->dev);
719 /* enable the power domain */
720 if (pm_runtime_get_sync(&adev->dev)) {
721 dev_err(&adev->dev, "failed to get runtime pm\n");
727 clk_enable(pdmac->clk);
731 ret = request_irq(irq, pl330_irq_handler, 0,
732 dev_name(&adev->dev), pi);
740 INIT_LIST_HEAD(&pdmac->desc_pool);
741 spin_lock_init(&pdmac->pool_lock);
743 /* Create a descriptor pool of default size */
744 if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC))
745 dev_warn(&adev->dev, "unable to allocate desc\n");
748 INIT_LIST_HEAD(&pd->channels);
750 /* Initialize channel parameters */
751 num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
752 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
754 for (i = 0; i < num_chan; i++) {
755 pch = &pdmac->peripherals[i];
757 struct dma_pl330_peri *peri = &pdat->peri[i];
759 switch (peri->rqtype) {
761 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
765 dma_cap_set(DMA_SLAVE, pd->cap_mask);
768 dev_err(&adev->dev, "DEVTODEV Not Supported\n");
771 pch->chan.private = peri;
773 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
774 pch->chan.private = NULL;
777 INIT_LIST_HEAD(&pch->work_list);
778 spin_lock_init(&pch->lock);
779 pch->pl330_chid = NULL;
780 pch->chan.device = pd;
781 pch->chan.chan_id = i;
784 /* Add the channel to the DMAC list */
786 list_add_tail(&pch->chan.device_node, &pd->channels);
789 pd->dev = &adev->dev;
791 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
792 pd->device_free_chan_resources = pl330_free_chan_resources;
793 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
794 pd->device_tx_status = pl330_tx_status;
795 pd->device_prep_slave_sg = pl330_prep_slave_sg;
796 pd->device_control = pl330_control;
797 pd->device_issue_pending = pl330_issue_pending;
799 ret = dma_async_device_register(pd);
801 dev_err(&adev->dev, "unable to register DMAC\n");
806 "Loaded driver for PL330 DMAC-%d\n", adev->periphid);
808 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
809 pi->pcfg.data_buf_dep,
810 pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
811 pi->pcfg.num_peri, pi->pcfg.num_events);
822 release_mem_region(res->start, resource_size(res));
828 static int __devexit pl330_remove(struct amba_device *adev)
830 struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev);
831 struct dma_pl330_chan *pch, *_p;
832 struct pl330_info *pi;
833 struct resource *res;
839 amba_set_drvdata(adev, NULL);
842 list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
845 /* Remove the channel */
846 list_del(&pch->chan.device_node);
848 /* Flush the channel */
849 pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
850 pl330_free_chan_resources(&pch->chan);
863 release_mem_region(res->start, resource_size(res));
865 #ifdef CONFIG_PM_RUNTIME
866 pm_runtime_put(&adev->dev);
867 pm_runtime_disable(&adev->dev);
869 clk_disable(pdmac->clk);
877 static struct amba_id pl330_ids[] = {
885 #ifdef CONFIG_PM_RUNTIME
886 static int pl330_runtime_suspend(struct device *dev)
888 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
891 dev_err(dev, "failed to get dmac\n");
895 clk_disable(pdmac->clk);
900 static int pl330_runtime_resume(struct device *dev)
902 struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev);
905 dev_err(dev, "failed to get dmac\n");
909 clk_enable(pdmac->clk);
914 #define pl330_runtime_suspend NULL
915 #define pl330_runtime_resume NULL
916 #endif /* CONFIG_PM_RUNTIME */
918 static const struct dev_pm_ops pl330_pm_ops = {
919 .runtime_suspend = pl330_runtime_suspend,
920 .runtime_resume = pl330_runtime_resume,
923 static struct amba_driver pl330_driver = {
925 .owner = THIS_MODULE,
929 .id_table = pl330_ids,
930 .probe = pl330_probe,
931 .remove = pl330_remove,
934 static int __init pl330_init(void)
936 return amba_driver_register(&pl330_driver);
938 module_init(pl330_init);
940 static void __exit pl330_exit(void)
942 amba_driver_unregister(&pl330_driver);
945 module_exit(pl330_exit);
947 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
948 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
949 MODULE_LICENSE("GPL");