2 * offload engine driver for the Marvell XOR engine
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 #include <linux/init.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/spinlock.h>
25 #include <linux/interrupt.h>
26 #include <linux/platform_device.h>
27 #include <linux/memory.h>
28 #include <plat/mv_xor.h>
31 static void mv_xor_issue_pending(struct dma_chan *chan);
33 #define to_mv_xor_chan(chan) \
34 container_of(chan, struct mv_xor_chan, common)
36 #define to_mv_xor_device(dev) \
37 container_of(dev, struct mv_xor_device, common)
39 #define to_mv_xor_slot(tx) \
40 container_of(tx, struct mv_xor_desc_slot, async_tx)
42 static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
44 struct mv_xor_desc *hw_desc = desc->hw_desc;
46 hw_desc->status = (1 << 31);
47 hw_desc->phy_next_desc = 0;
48 hw_desc->desc_command = (1 << 31);
51 static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
53 struct mv_xor_desc *hw_desc = desc->hw_desc;
54 return hw_desc->phy_dest_addr;
57 static u32 mv_desc_get_src_addr(struct mv_xor_desc_slot *desc,
60 struct mv_xor_desc *hw_desc = desc->hw_desc;
61 return hw_desc->phy_src_addr[src_idx];
65 static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
68 struct mv_xor_desc *hw_desc = desc->hw_desc;
69 hw_desc->byte_count = byte_count;
72 static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc,
75 struct mv_xor_desc *hw_desc = desc->hw_desc;
76 BUG_ON(hw_desc->phy_next_desc);
77 hw_desc->phy_next_desc = next_desc_addr;
80 static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc)
82 struct mv_xor_desc *hw_desc = desc->hw_desc;
83 hw_desc->phy_next_desc = 0;
86 static void mv_desc_set_block_fill_val(struct mv_xor_desc_slot *desc, u32 val)
91 static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc,
94 struct mv_xor_desc *hw_desc = desc->hw_desc;
95 hw_desc->phy_dest_addr = addr;
98 static int mv_chan_memset_slot_count(size_t len)
103 #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c)
105 static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
106 int index, dma_addr_t addr)
108 struct mv_xor_desc *hw_desc = desc->hw_desc;
109 hw_desc->phy_src_addr[index] = addr;
110 if (desc->type == DMA_XOR)
111 hw_desc->desc_command |= (1 << index);
114 static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
116 return __raw_readl(XOR_CURR_DESC(chan));
119 static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
122 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan));
125 static void mv_chan_set_dest_pointer(struct mv_xor_chan *chan, u32 desc_addr)
127 __raw_writel(desc_addr, XOR_DEST_POINTER(chan));
130 static void mv_chan_set_block_size(struct mv_xor_chan *chan, u32 block_size)
132 __raw_writel(block_size, XOR_BLOCK_SIZE(chan));
135 static void mv_chan_set_value(struct mv_xor_chan *chan, u32 value)
137 __raw_writel(value, XOR_INIT_VALUE_LOW(chan));
138 __raw_writel(value, XOR_INIT_VALUE_HIGH(chan));
141 static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
143 u32 val = __raw_readl(XOR_INTR_MASK(chan));
144 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
145 __raw_writel(val, XOR_INTR_MASK(chan));
148 static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
150 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan));
151 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
155 static int mv_is_err_intr(u32 intr_cause)
157 if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9)))
163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
165 u32 val = ~(1 << (chan->idx * 16));
166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val);
167 __raw_writel(val, XOR_INTR_CAUSE(chan));
170 static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
172 u32 val = 0xFFFF0000 >> (chan->idx * 16);
173 __raw_writel(val, XOR_INTR_CAUSE(chan));
176 static int mv_can_chain(struct mv_xor_desc_slot *desc)
178 struct mv_xor_desc_slot *chain_old_tail = list_entry(
179 desc->chain_node.prev, struct mv_xor_desc_slot, chain_node);
181 if (chain_old_tail->type != desc->type)
183 if (desc->type == DMA_MEMSET)
189 static void mv_set_mode(struct mv_xor_chan *chan,
190 enum dma_transaction_type type)
193 u32 config = __raw_readl(XOR_CONFIG(chan));
197 op_mode = XOR_OPERATION_MODE_XOR;
200 op_mode = XOR_OPERATION_MODE_MEMCPY;
203 op_mode = XOR_OPERATION_MODE_MEMSET;
206 dev_printk(KERN_ERR, chan->device->common.dev,
207 "error: unsupported operation %d.\n",
215 __raw_writel(config, XOR_CONFIG(chan));
216 chan->current_type = type;
219 static void mv_chan_activate(struct mv_xor_chan *chan)
221 dev_dbg(chan->device->common.dev, " activate chan.\n");
223 /* writel ensures all descriptors are flushed before activation */
224 writel(BIT(0), XOR_ACTIVATION(chan));
227 static char mv_chan_is_busy(struct mv_xor_chan *chan)
229 u32 state = __raw_readl(XOR_ACTIVATION(chan));
231 state = (state >> 4) & 0x3;
233 return (state == 1) ? 1 : 0;
236 static int mv_chan_xor_slot_count(size_t len, int src_cnt)
242 * mv_xor_free_slots - flags descriptor slots for reuse
243 * @slot: Slot to free
244 * Caller must hold &mv_chan->lock while calling this function
246 static void mv_xor_free_slots(struct mv_xor_chan *mv_chan,
247 struct mv_xor_desc_slot *slot)
249 dev_dbg(mv_chan->device->common.dev, "%s %d slot %p\n",
250 __func__, __LINE__, slot);
252 slot->slots_per_op = 0;
257 * mv_xor_start_new_chain - program the engine to operate on new chain headed by
259 * Caller must hold &mv_chan->lock while calling this function
261 static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan,
262 struct mv_xor_desc_slot *sw_desc)
264 dev_dbg(mv_chan->device->common.dev, "%s %d: sw_desc %p\n",
265 __func__, __LINE__, sw_desc);
266 if (sw_desc->type != mv_chan->current_type)
267 mv_set_mode(mv_chan, sw_desc->type);
269 if (sw_desc->type == DMA_MEMSET) {
270 /* for memset requests we need to program the engine, no
273 struct mv_xor_desc *hw_desc = sw_desc->hw_desc;
274 mv_chan_set_dest_pointer(mv_chan, hw_desc->phy_dest_addr);
275 mv_chan_set_block_size(mv_chan, sw_desc->unmap_len);
276 mv_chan_set_value(mv_chan, sw_desc->value);
278 /* set the hardware chain */
279 mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys);
281 mv_chan->pending += sw_desc->slot_cnt;
282 mv_xor_issue_pending(&mv_chan->common);
286 mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc,
287 struct mv_xor_chan *mv_chan, dma_cookie_t cookie)
289 BUG_ON(desc->async_tx.cookie < 0);
291 if (desc->async_tx.cookie > 0) {
292 cookie = desc->async_tx.cookie;
294 /* call the callback (must not sleep or submit new
295 * operations to this channel)
297 if (desc->async_tx.callback)
298 desc->async_tx.callback(
299 desc->async_tx.callback_param);
301 /* unmap dma addresses
302 * (unmap_single vs unmap_page?)
304 if (desc->group_head && desc->unmap_len) {
305 struct mv_xor_desc_slot *unmap = desc->group_head;
307 &mv_chan->device->pdev->dev;
308 u32 len = unmap->unmap_len;
309 enum dma_ctrl_flags flags = desc->async_tx.flags;
314 src_cnt = unmap->unmap_src_cnt;
315 dest = mv_desc_get_dest_addr(unmap);
316 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
317 enum dma_data_direction dir;
319 if (src_cnt > 1) /* is xor ? */
320 dir = DMA_BIDIRECTIONAL;
322 dir = DMA_FROM_DEVICE;
323 dma_unmap_page(dev, dest, len, dir);
326 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
328 addr = mv_desc_get_src_addr(unmap,
332 dma_unmap_page(dev, addr, len,
336 desc->group_head = NULL;
340 /* run dependent operations */
341 dma_run_dependencies(&desc->async_tx);
347 mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan)
349 struct mv_xor_desc_slot *iter, *_iter;
351 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
352 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
355 if (async_tx_test_ack(&iter->async_tx)) {
356 list_del(&iter->completed_node);
357 mv_xor_free_slots(mv_chan, iter);
364 mv_xor_clean_slot(struct mv_xor_desc_slot *desc,
365 struct mv_xor_chan *mv_chan)
367 dev_dbg(mv_chan->device->common.dev, "%s %d: desc %p flags %d\n",
368 __func__, __LINE__, desc, desc->async_tx.flags);
369 list_del(&desc->chain_node);
370 /* the client is allowed to attach dependent operations
373 if (!async_tx_test_ack(&desc->async_tx)) {
374 /* move this slot to the completed_slots */
375 list_add_tail(&desc->completed_node, &mv_chan->completed_slots);
379 mv_xor_free_slots(mv_chan, desc);
383 static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
385 struct mv_xor_desc_slot *iter, *_iter;
386 dma_cookie_t cookie = 0;
387 int busy = mv_chan_is_busy(mv_chan);
388 u32 current_desc = mv_chan_get_current_desc(mv_chan);
389 int current_cleaned = 0;
390 struct mv_xor_desc *hw_desc;
392 dev_dbg(mv_chan->device->common.dev, "%s %d\n", __func__, __LINE__);
393 dev_dbg(mv_chan->device->common.dev, "current_desc %x\n", current_desc);
394 mv_xor_clean_completed_slots(mv_chan);
396 /* free completed slots from the chain starting with
397 * the oldest descriptor
400 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
403 /* clean finished descriptors */
404 hw_desc = iter->hw_desc;
405 if (hw_desc->status & XOR_DESC_SUCCESS) {
406 cookie = mv_xor_run_tx_complete_actions(iter, mv_chan,
409 /* done processing desc, clean slot */
410 mv_xor_clean_slot(iter, mv_chan);
412 /* break if we did cleaned the current */
413 if (iter->async_tx.phys == current_desc) {
418 if (iter->async_tx.phys == current_desc) {
425 if ((busy == 0) && !list_empty(&mv_chan->chain)) {
426 if (current_cleaned) {
428 * current descriptor cleaned and removed, run
431 iter = list_entry(mv_chan->chain.next,
432 struct mv_xor_desc_slot,
434 mv_xor_start_new_chain(mv_chan, iter);
436 if (!list_is_last(&iter->chain_node, &mv_chan->chain)) {
438 * descriptors are still waiting after
439 * current, trigger them
441 iter = list_entry(iter->chain_node.next,
442 struct mv_xor_desc_slot,
444 mv_xor_start_new_chain(mv_chan, iter);
447 * some descriptors are still waiting
450 tasklet_schedule(&mv_chan->irq_tasklet);
456 mv_chan->completed_cookie = cookie;
460 mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
462 spin_lock_bh(&mv_chan->lock);
463 __mv_xor_slot_cleanup(mv_chan);
464 spin_unlock_bh(&mv_chan->lock);
467 static void mv_xor_tasklet(unsigned long data)
469 struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
470 mv_xor_slot_cleanup(chan);
473 static struct mv_xor_desc_slot *
474 mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots,
477 struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL;
479 int slots_found, retry = 0;
481 /* start search from the last allocated descrtiptor
482 * if a contiguous allocation can not be found start searching
483 * from the beginning of the list
488 iter = mv_chan->last_used;
490 iter = list_entry(&mv_chan->all_slots,
491 struct mv_xor_desc_slot,
494 list_for_each_entry_safe_continue(
495 iter, _iter, &mv_chan->all_slots, slot_node) {
497 prefetch(&_iter->async_tx);
498 if (iter->slots_per_op) {
499 /* give up after finding the first busy slot
500 * on the second pass through the list
509 /* start the allocation if the slot is correctly aligned */
513 if (slots_found == num_slots) {
514 struct mv_xor_desc_slot *alloc_tail = NULL;
515 struct mv_xor_desc_slot *last_used = NULL;
520 /* pre-ack all but the last descriptor */
521 async_tx_ack(&iter->async_tx);
523 list_add_tail(&iter->chain_node, &chain);
525 iter->async_tx.cookie = 0;
526 iter->slot_cnt = num_slots;
527 iter->xor_check_result = NULL;
528 for (i = 0; i < slots_per_op; i++) {
529 iter->slots_per_op = slots_per_op - i;
531 iter = list_entry(iter->slot_node.next,
532 struct mv_xor_desc_slot,
535 num_slots -= slots_per_op;
537 alloc_tail->group_head = alloc_start;
538 alloc_tail->async_tx.cookie = -EBUSY;
539 list_splice(&chain, &alloc_tail->tx_list);
540 mv_chan->last_used = last_used;
541 mv_desc_clear_next_desc(alloc_start);
542 mv_desc_clear_next_desc(alloc_tail);
549 /* try to free some slots if the allocation fails */
550 tasklet_schedule(&mv_chan->irq_tasklet);
556 mv_desc_assign_cookie(struct mv_xor_chan *mv_chan,
557 struct mv_xor_desc_slot *desc)
559 dma_cookie_t cookie = mv_chan->common.cookie;
563 mv_chan->common.cookie = desc->async_tx.cookie = cookie;
567 /************************ DMA engine API functions ****************************/
569 mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
571 struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx);
572 struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan);
573 struct mv_xor_desc_slot *grp_start, *old_chain_tail;
575 int new_hw_chain = 1;
577 dev_dbg(mv_chan->device->common.dev,
578 "%s sw_desc %p: async_tx %p\n",
579 __func__, sw_desc, &sw_desc->async_tx);
581 grp_start = sw_desc->group_head;
583 spin_lock_bh(&mv_chan->lock);
584 cookie = mv_desc_assign_cookie(mv_chan, sw_desc);
586 if (list_empty(&mv_chan->chain))
587 list_splice_init(&sw_desc->tx_list, &mv_chan->chain);
591 old_chain_tail = list_entry(mv_chan->chain.prev,
592 struct mv_xor_desc_slot,
594 list_splice_init(&grp_start->tx_list,
595 &old_chain_tail->chain_node);
597 if (!mv_can_chain(grp_start))
600 dev_dbg(mv_chan->device->common.dev, "Append to last desc %x\n",
601 old_chain_tail->async_tx.phys);
603 /* fix up the hardware chain */
604 mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
606 /* if the channel is not busy */
607 if (!mv_chan_is_busy(mv_chan)) {
608 u32 current_desc = mv_chan_get_current_desc(mv_chan);
610 * and the curren desc is the end of the chain before
611 * the append, then we need to start the channel
613 if (current_desc == old_chain_tail->async_tx.phys)
619 mv_xor_start_new_chain(mv_chan, grp_start);
622 spin_unlock_bh(&mv_chan->lock);
627 /* returns the number of allocated descriptors */
628 static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
632 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
633 struct mv_xor_desc_slot *slot = NULL;
634 struct mv_xor_platform_data *plat_data =
635 mv_chan->device->pdev->dev.platform_data;
636 int num_descs_in_pool = plat_data->pool_size/MV_XOR_SLOT_SIZE;
638 /* Allocate descriptor slots */
639 idx = mv_chan->slots_allocated;
640 while (idx < num_descs_in_pool) {
641 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
643 printk(KERN_INFO "MV XOR Channel only initialized"
644 " %d descriptor slots", idx);
647 hw_desc = (char *) mv_chan->device->dma_desc_pool_virt;
648 slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
650 dma_async_tx_descriptor_init(&slot->async_tx, chan);
651 slot->async_tx.tx_submit = mv_xor_tx_submit;
652 INIT_LIST_HEAD(&slot->chain_node);
653 INIT_LIST_HEAD(&slot->slot_node);
654 INIT_LIST_HEAD(&slot->tx_list);
655 hw_desc = (char *) mv_chan->device->dma_desc_pool;
656 slot->async_tx.phys =
657 (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
660 spin_lock_bh(&mv_chan->lock);
661 mv_chan->slots_allocated = idx;
662 list_add_tail(&slot->slot_node, &mv_chan->all_slots);
663 spin_unlock_bh(&mv_chan->lock);
666 if (mv_chan->slots_allocated && !mv_chan->last_used)
667 mv_chan->last_used = list_entry(mv_chan->all_slots.next,
668 struct mv_xor_desc_slot,
671 dev_dbg(mv_chan->device->common.dev,
672 "allocated %d descriptor slots last_used: %p\n",
673 mv_chan->slots_allocated, mv_chan->last_used);
675 return mv_chan->slots_allocated ? : -ENOMEM;
678 static struct dma_async_tx_descriptor *
679 mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
680 size_t len, unsigned long flags)
682 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
683 struct mv_xor_desc_slot *sw_desc, *grp_start;
686 dev_dbg(mv_chan->device->common.dev,
687 "%s dest: %x src %x len: %u flags: %ld\n",
688 __func__, dest, src, len, flags);
689 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
692 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
694 spin_lock_bh(&mv_chan->lock);
695 slot_cnt = mv_chan_memcpy_slot_count(len);
696 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
698 sw_desc->type = DMA_MEMCPY;
699 sw_desc->async_tx.flags = flags;
700 grp_start = sw_desc->group_head;
701 mv_desc_init(grp_start, flags);
702 mv_desc_set_byte_count(grp_start, len);
703 mv_desc_set_dest_addr(sw_desc->group_head, dest);
704 mv_desc_set_src_addr(grp_start, 0, src);
705 sw_desc->unmap_src_cnt = 1;
706 sw_desc->unmap_len = len;
708 spin_unlock_bh(&mv_chan->lock);
710 dev_dbg(mv_chan->device->common.dev,
711 "%s sw_desc %p async_tx %p\n",
712 __func__, sw_desc, sw_desc ? &sw_desc->async_tx : 0);
714 return sw_desc ? &sw_desc->async_tx : NULL;
717 static struct dma_async_tx_descriptor *
718 mv_xor_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
719 size_t len, unsigned long flags)
721 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
722 struct mv_xor_desc_slot *sw_desc, *grp_start;
725 dev_dbg(mv_chan->device->common.dev,
726 "%s dest: %x len: %u flags: %ld\n",
727 __func__, dest, len, flags);
728 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
731 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
733 spin_lock_bh(&mv_chan->lock);
734 slot_cnt = mv_chan_memset_slot_count(len);
735 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
737 sw_desc->type = DMA_MEMSET;
738 sw_desc->async_tx.flags = flags;
739 grp_start = sw_desc->group_head;
740 mv_desc_init(grp_start, flags);
741 mv_desc_set_byte_count(grp_start, len);
742 mv_desc_set_dest_addr(sw_desc->group_head, dest);
743 mv_desc_set_block_fill_val(grp_start, value);
744 sw_desc->unmap_src_cnt = 1;
745 sw_desc->unmap_len = len;
747 spin_unlock_bh(&mv_chan->lock);
748 dev_dbg(mv_chan->device->common.dev,
749 "%s sw_desc %p async_tx %p \n",
750 __func__, sw_desc, &sw_desc->async_tx);
751 return sw_desc ? &sw_desc->async_tx : NULL;
754 static struct dma_async_tx_descriptor *
755 mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
756 unsigned int src_cnt, size_t len, unsigned long flags)
758 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
759 struct mv_xor_desc_slot *sw_desc, *grp_start;
762 if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
765 BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
767 dev_dbg(mv_chan->device->common.dev,
768 "%s src_cnt: %d len: dest %x %u flags: %ld\n",
769 __func__, src_cnt, len, dest, flags);
771 spin_lock_bh(&mv_chan->lock);
772 slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
773 sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1);
775 sw_desc->type = DMA_XOR;
776 sw_desc->async_tx.flags = flags;
777 grp_start = sw_desc->group_head;
778 mv_desc_init(grp_start, flags);
779 /* the byte count field is the same as in memcpy desc*/
780 mv_desc_set_byte_count(grp_start, len);
781 mv_desc_set_dest_addr(sw_desc->group_head, dest);
782 sw_desc->unmap_src_cnt = src_cnt;
783 sw_desc->unmap_len = len;
785 mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]);
787 spin_unlock_bh(&mv_chan->lock);
788 dev_dbg(mv_chan->device->common.dev,
789 "%s sw_desc %p async_tx %p \n",
790 __func__, sw_desc, &sw_desc->async_tx);
791 return sw_desc ? &sw_desc->async_tx : NULL;
794 static void mv_xor_free_chan_resources(struct dma_chan *chan)
796 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
797 struct mv_xor_desc_slot *iter, *_iter;
798 int in_use_descs = 0;
800 mv_xor_slot_cleanup(mv_chan);
802 spin_lock_bh(&mv_chan->lock);
803 list_for_each_entry_safe(iter, _iter, &mv_chan->chain,
806 list_del(&iter->chain_node);
808 list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots,
811 list_del(&iter->completed_node);
813 list_for_each_entry_safe_reverse(
814 iter, _iter, &mv_chan->all_slots, slot_node) {
815 list_del(&iter->slot_node);
817 mv_chan->slots_allocated--;
819 mv_chan->last_used = NULL;
821 dev_dbg(mv_chan->device->common.dev, "%s slots_allocated %d\n",
822 __func__, mv_chan->slots_allocated);
823 spin_unlock_bh(&mv_chan->lock);
826 dev_err(mv_chan->device->common.dev,
827 "freeing %d in use descriptors!\n", in_use_descs);
831 * mv_xor_status - poll the status of an XOR transaction
832 * @chan: XOR channel handle
833 * @cookie: XOR transaction identifier
834 * @txstate: XOR transactions state holder (or NULL)
836 static enum dma_status mv_xor_status(struct dma_chan *chan,
838 struct dma_tx_state *txstate)
840 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
841 dma_cookie_t last_used;
842 dma_cookie_t last_complete;
845 last_used = chan->cookie;
846 last_complete = mv_chan->completed_cookie;
847 mv_chan->is_complete_cookie = cookie;
848 dma_set_tx_state(txstate, last_complete, last_used, 0);
850 ret = dma_async_is_complete(cookie, last_complete, last_used);
851 if (ret == DMA_SUCCESS) {
852 mv_xor_clean_completed_slots(mv_chan);
855 mv_xor_slot_cleanup(mv_chan);
857 last_used = chan->cookie;
858 last_complete = mv_chan->completed_cookie;
860 dma_set_tx_state(txstate, last_complete, last_used, 0);
861 return dma_async_is_complete(cookie, last_complete, last_used);
864 static void mv_dump_xor_regs(struct mv_xor_chan *chan)
868 val = __raw_readl(XOR_CONFIG(chan));
869 dev_printk(KERN_ERR, chan->device->common.dev,
870 "config 0x%08x.\n", val);
872 val = __raw_readl(XOR_ACTIVATION(chan));
873 dev_printk(KERN_ERR, chan->device->common.dev,
874 "activation 0x%08x.\n", val);
876 val = __raw_readl(XOR_INTR_CAUSE(chan));
877 dev_printk(KERN_ERR, chan->device->common.dev,
878 "intr cause 0x%08x.\n", val);
880 val = __raw_readl(XOR_INTR_MASK(chan));
881 dev_printk(KERN_ERR, chan->device->common.dev,
882 "intr mask 0x%08x.\n", val);
884 val = __raw_readl(XOR_ERROR_CAUSE(chan));
885 dev_printk(KERN_ERR, chan->device->common.dev,
886 "error cause 0x%08x.\n", val);
888 val = __raw_readl(XOR_ERROR_ADDR(chan));
889 dev_printk(KERN_ERR, chan->device->common.dev,
890 "error addr 0x%08x.\n", val);
893 static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan,
896 if (intr_cause & (1 << 4)) {
897 dev_dbg(chan->device->common.dev,
898 "ignore this error\n");
902 dev_printk(KERN_ERR, chan->device->common.dev,
903 "error on chan %d. intr cause 0x%08x.\n",
904 chan->idx, intr_cause);
906 mv_dump_xor_regs(chan);
910 static irqreturn_t mv_xor_interrupt_handler(int irq, void *data)
912 struct mv_xor_chan *chan = data;
913 u32 intr_cause = mv_chan_get_intr_cause(chan);
915 dev_dbg(chan->device->common.dev, "intr cause %x\n", intr_cause);
917 if (mv_is_err_intr(intr_cause))
918 mv_xor_err_interrupt_handler(chan, intr_cause);
920 tasklet_schedule(&chan->irq_tasklet);
922 mv_xor_device_clear_eoc_cause(chan);
927 static void mv_xor_issue_pending(struct dma_chan *chan)
929 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
931 if (mv_chan->pending >= MV_XOR_THRESHOLD) {
932 mv_chan->pending = 0;
933 mv_chan_activate(mv_chan);
938 * Perform a transaction to verify the HW works.
940 #define MV_XOR_TEST_SIZE 2000
942 static int __devinit mv_xor_memcpy_self_test(struct mv_xor_device *device)
946 dma_addr_t src_dma, dest_dma;
947 struct dma_chan *dma_chan;
949 struct dma_async_tx_descriptor *tx;
951 struct mv_xor_chan *mv_chan;
953 src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
957 dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
963 /* Fill in src buffer */
964 for (i = 0; i < MV_XOR_TEST_SIZE; i++)
965 ((u8 *) src)[i] = (u8)i;
967 /* Start copy, using first DMA channel */
968 dma_chan = container_of(device->common.channels.next,
971 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
976 dest_dma = dma_map_single(dma_chan->device->dev, dest,
977 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
979 src_dma = dma_map_single(dma_chan->device->dev, src,
980 MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
982 tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
983 MV_XOR_TEST_SIZE, 0);
984 cookie = mv_xor_tx_submit(tx);
985 mv_xor_issue_pending(dma_chan);
989 if (mv_xor_status(dma_chan, cookie, NULL) !=
991 dev_printk(KERN_ERR, dma_chan->device->dev,
992 "Self-test copy timed out, disabling\n");
997 mv_chan = to_mv_xor_chan(dma_chan);
998 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
999 MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
1000 if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
1001 dev_printk(KERN_ERR, dma_chan->device->dev,
1002 "Self-test copy failed compare, disabling\n");
1004 goto free_resources;
1008 mv_xor_free_chan_resources(dma_chan);
1015 #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */
1016 static int __devinit
1017 mv_xor_xor_self_test(struct mv_xor_device *device)
1021 struct page *xor_srcs[MV_XOR_NUM_SRC_TEST];
1022 dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
1023 dma_addr_t dest_dma;
1024 struct dma_async_tx_descriptor *tx;
1025 struct dma_chan *dma_chan;
1026 dma_cookie_t cookie;
1030 struct mv_xor_chan *mv_chan;
1032 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1033 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1034 if (!xor_srcs[src_idx]) {
1036 __free_page(xor_srcs[src_idx]);
1041 dest = alloc_page(GFP_KERNEL);
1044 __free_page(xor_srcs[src_idx]);
1048 /* Fill in src buffers */
1049 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
1050 u8 *ptr = page_address(xor_srcs[src_idx]);
1051 for (i = 0; i < PAGE_SIZE; i++)
1052 ptr[i] = (1 << src_idx);
1055 for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
1056 cmp_byte ^= (u8) (1 << src_idx);
1058 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1059 (cmp_byte << 8) | cmp_byte;
1061 memset(page_address(dest), 0, PAGE_SIZE);
1063 dma_chan = container_of(device->common.channels.next,
1066 if (mv_xor_alloc_chan_resources(dma_chan) < 1) {
1072 dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
1075 for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
1076 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1077 0, PAGE_SIZE, DMA_TO_DEVICE);
1079 tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1080 MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
1082 cookie = mv_xor_tx_submit(tx);
1083 mv_xor_issue_pending(dma_chan);
1087 if (mv_xor_status(dma_chan, cookie, NULL) !=
1089 dev_printk(KERN_ERR, dma_chan->device->dev,
1090 "Self-test xor timed out, disabling\n");
1092 goto free_resources;
1095 mv_chan = to_mv_xor_chan(dma_chan);
1096 dma_sync_single_for_cpu(&mv_chan->device->pdev->dev, dest_dma,
1097 PAGE_SIZE, DMA_FROM_DEVICE);
1098 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1099 u32 *ptr = page_address(dest);
1100 if (ptr[i] != cmp_word) {
1101 dev_printk(KERN_ERR, dma_chan->device->dev,
1102 "Self-test xor failed compare, disabling."
1103 " index %d, data %x, expected %x\n", i,
1106 goto free_resources;
1111 mv_xor_free_chan_resources(dma_chan);
1113 src_idx = MV_XOR_NUM_SRC_TEST;
1115 __free_page(xor_srcs[src_idx]);
1120 static int __devexit mv_xor_remove(struct platform_device *dev)
1122 struct mv_xor_device *device = platform_get_drvdata(dev);
1123 struct dma_chan *chan, *_chan;
1124 struct mv_xor_chan *mv_chan;
1125 struct mv_xor_platform_data *plat_data = dev->dev.platform_data;
1127 dma_async_device_unregister(&device->common);
1129 dma_free_coherent(&dev->dev, plat_data->pool_size,
1130 device->dma_desc_pool_virt, device->dma_desc_pool);
1132 list_for_each_entry_safe(chan, _chan, &device->common.channels,
1134 mv_chan = to_mv_xor_chan(chan);
1135 list_del(&chan->device_node);
1141 static int __devinit mv_xor_probe(struct platform_device *pdev)
1145 struct mv_xor_device *adev;
1146 struct mv_xor_chan *mv_chan;
1147 struct dma_device *dma_dev;
1148 struct mv_xor_platform_data *plat_data = pdev->dev.platform_data;
1151 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
1155 dma_dev = &adev->common;
1157 /* allocate coherent memory for hardware descriptors
1158 * note: writecombine gives slightly better performance, but
1159 * requires that we explicitly flush the writes
1161 adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1162 plat_data->pool_size,
1163 &adev->dma_desc_pool,
1165 if (!adev->dma_desc_pool_virt)
1168 adev->id = plat_data->hw_id;
1170 /* discover transaction capabilites from the platform data */
1171 dma_dev->cap_mask = plat_data->cap_mask;
1173 platform_set_drvdata(pdev, adev);
1175 adev->shared = platform_get_drvdata(plat_data->shared);
1177 INIT_LIST_HEAD(&dma_dev->channels);
1179 /* set base routines */
1180 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources;
1181 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
1182 dma_dev->device_tx_status = mv_xor_status;
1183 dma_dev->device_issue_pending = mv_xor_issue_pending;
1184 dma_dev->dev = &pdev->dev;
1186 /* set prep routines based on capability */
1187 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1188 dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy;
1189 if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1190 dma_dev->device_prep_dma_memset = mv_xor_prep_dma_memset;
1191 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1192 dma_dev->max_xor = 8;
1193 dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor;
1196 mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL);
1201 mv_chan->device = adev;
1202 mv_chan->idx = plat_data->hw_id;
1203 mv_chan->mmr_base = adev->shared->xor_base;
1205 if (!mv_chan->mmr_base) {
1209 tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long)
1212 /* clear errors before enabling interrupts */
1213 mv_xor_device_clear_err_status(mv_chan);
1215 irq = platform_get_irq(pdev, 0);
1220 ret = devm_request_irq(&pdev->dev, irq,
1221 mv_xor_interrupt_handler,
1222 0, dev_name(&pdev->dev), mv_chan);
1226 mv_chan_unmask_interrupts(mv_chan);
1228 mv_set_mode(mv_chan, DMA_MEMCPY);
1230 spin_lock_init(&mv_chan->lock);
1231 INIT_LIST_HEAD(&mv_chan->chain);
1232 INIT_LIST_HEAD(&mv_chan->completed_slots);
1233 INIT_LIST_HEAD(&mv_chan->all_slots);
1234 mv_chan->common.device = dma_dev;
1236 list_add_tail(&mv_chan->common.device_node, &dma_dev->channels);
1238 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1239 ret = mv_xor_memcpy_self_test(adev);
1240 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1245 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1246 ret = mv_xor_xor_self_test(adev);
1247 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1252 dev_printk(KERN_INFO, &pdev->dev, "Marvell XOR: "
1254 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1255 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "",
1256 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1257 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1259 dma_async_device_register(dma_dev);
1263 dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1264 adev->dma_desc_pool_virt, adev->dma_desc_pool);
1270 mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
1271 struct mbus_dram_target_info *dram)
1273 void __iomem *base = msp->xor_base;
1277 for (i = 0; i < 8; i++) {
1278 writel(0, base + WINDOW_BASE(i));
1279 writel(0, base + WINDOW_SIZE(i));
1281 writel(0, base + WINDOW_REMAP_HIGH(i));
1284 for (i = 0; i < dram->num_cs; i++) {
1285 struct mbus_dram_window *cs = dram->cs + i;
1287 writel((cs->base & 0xffff0000) |
1288 (cs->mbus_attr << 8) |
1289 dram->mbus_dram_target_id, base + WINDOW_BASE(i));
1290 writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i));
1292 win_enable |= (1 << i);
1293 win_enable |= 3 << (16 + (2 * i));
1296 writel(win_enable, base + WINDOW_BAR_ENABLE(0));
1297 writel(win_enable, base + WINDOW_BAR_ENABLE(1));
1300 static struct platform_driver mv_xor_driver = {
1301 .probe = mv_xor_probe,
1302 .remove = __devexit_p(mv_xor_remove),
1304 .owner = THIS_MODULE,
1305 .name = MV_XOR_NAME,
1309 static int mv_xor_shared_probe(struct platform_device *pdev)
1311 struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
1312 struct mv_xor_shared_private *msp;
1313 struct resource *res;
1315 dev_printk(KERN_NOTICE, &pdev->dev, "Marvell shared XOR driver\n");
1317 msp = devm_kzalloc(&pdev->dev, sizeof(*msp), GFP_KERNEL);
1321 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1325 msp->xor_base = devm_ioremap(&pdev->dev, res->start,
1326 resource_size(res));
1330 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1334 msp->xor_high_base = devm_ioremap(&pdev->dev, res->start,
1335 resource_size(res));
1336 if (!msp->xor_high_base)
1339 platform_set_drvdata(pdev, msp);
1342 * (Re-)program MBUS remapping windows if we are asked to.
1344 if (msd != NULL && msd->dram != NULL)
1345 mv_xor_conf_mbus_windows(msp, msd->dram);
1350 static int mv_xor_shared_remove(struct platform_device *pdev)
1355 static struct platform_driver mv_xor_shared_driver = {
1356 .probe = mv_xor_shared_probe,
1357 .remove = mv_xor_shared_remove,
1359 .owner = THIS_MODULE,
1360 .name = MV_XOR_SHARED_NAME,
1365 static int __init mv_xor_init(void)
1369 rc = platform_driver_register(&mv_xor_shared_driver);
1371 rc = platform_driver_register(&mv_xor_driver);
1373 platform_driver_unregister(&mv_xor_shared_driver);
1377 module_init(mv_xor_init);
1379 /* it's currently unsafe to unload this module */
1381 static void __exit mv_xor_exit(void)
1383 platform_driver_unregister(&mv_xor_driver);
1384 platform_driver_unregister(&mv_xor_shared_driver);
1388 module_exit(mv_xor_exit);
1391 MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>");
1392 MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine");
1393 MODULE_LICENSE("GPL");