iop-adma: P+Q self test
[pandora-kernel.git] / drivers / dma / iop-adma.c
1 /*
2  * offload engine driver for the Intel Xscale series of i/o processors
3  * Copyright © 2006, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  */
19
20 /*
21  * This driver supports the asynchrounous DMA copy and RAID engines available
22  * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
23  */
24
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/spinlock.h>
30 #include <linux/interrupt.h>
31 #include <linux/platform_device.h>
32 #include <linux/memory.h>
33 #include <linux/ioport.h>
34 #include <linux/raid/pq.h>
35
36 #include <mach/adma.h>
37
38 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
39 #define to_iop_adma_device(dev) \
40         container_of(dev, struct iop_adma_device, common)
41 #define tx_to_iop_adma_slot(tx) \
42         container_of(tx, struct iop_adma_desc_slot, async_tx)
43
44 /**
45  * iop_adma_free_slots - flags descriptor slots for reuse
46  * @slot: Slot to free
47  * Caller must hold &iop_chan->lock while calling this function
48  */
49 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
50 {
51         int stride = slot->slots_per_op;
52
53         while (stride--) {
54                 slot->slots_per_op = 0;
55                 slot = list_entry(slot->slot_node.next,
56                                 struct iop_adma_desc_slot,
57                                 slot_node);
58         }
59 }
60
61 static void
62 iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
63 {
64         struct dma_async_tx_descriptor *tx = &desc->async_tx;
65         struct iop_adma_desc_slot *unmap = desc->group_head;
66         struct device *dev = &iop_chan->device->pdev->dev;
67         u32 len = unmap->unmap_len;
68         enum dma_ctrl_flags flags = tx->flags;
69         u32 src_cnt;
70         dma_addr_t addr;
71         dma_addr_t dest;
72
73         src_cnt = unmap->unmap_src_cnt;
74         dest = iop_desc_get_dest_addr(unmap, iop_chan);
75         if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
76                 enum dma_data_direction dir;
77
78                 if (src_cnt > 1) /* is xor? */
79                         dir = DMA_BIDIRECTIONAL;
80                 else
81                         dir = DMA_FROM_DEVICE;
82
83                 dma_unmap_page(dev, dest, len, dir);
84         }
85
86         if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
87                 while (src_cnt--) {
88                         addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt);
89                         if (addr == dest)
90                                 continue;
91                         dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
92                 }
93         }
94         desc->group_head = NULL;
95 }
96
97 static void
98 iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc)
99 {
100         struct dma_async_tx_descriptor *tx = &desc->async_tx;
101         struct iop_adma_desc_slot *unmap = desc->group_head;
102         struct device *dev = &iop_chan->device->pdev->dev;
103         u32 len = unmap->unmap_len;
104         enum dma_ctrl_flags flags = tx->flags;
105         u32 src_cnt = unmap->unmap_src_cnt;
106         dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan);
107         dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan);
108         int i;
109
110         if (tx->flags & DMA_PREP_CONTINUE)
111                 src_cnt -= 3;
112
113         if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) {
114                 dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL);
115                 dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL);
116         }
117
118         if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
119                 dma_addr_t addr;
120
121                 for (i = 0; i < src_cnt; i++) {
122                         addr = iop_desc_get_src_addr(unmap, iop_chan, i);
123                         dma_unmap_page(dev, addr, len, DMA_TO_DEVICE);
124                 }
125                 if (desc->pq_check_result) {
126                         dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE);
127                         dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE);
128                 }
129         }
130
131         desc->group_head = NULL;
132 }
133
134
135 static dma_cookie_t
136 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
137         struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
138 {
139         struct dma_async_tx_descriptor *tx = &desc->async_tx;
140
141         BUG_ON(tx->cookie < 0);
142         if (tx->cookie > 0) {
143                 cookie = tx->cookie;
144                 tx->cookie = 0;
145
146                 /* call the callback (must not sleep or submit new
147                  * operations to this channel)
148                  */
149                 if (tx->callback)
150                         tx->callback(tx->callback_param);
151
152                 /* unmap dma addresses
153                  * (unmap_single vs unmap_page?)
154                  */
155                 if (desc->group_head && desc->unmap_len) {
156                         if (iop_desc_is_pq(desc))
157                                 iop_desc_unmap_pq(iop_chan, desc);
158                         else
159                                 iop_desc_unmap(iop_chan, desc);
160                 }
161         }
162
163         /* run dependent operations */
164         dma_run_dependencies(tx);
165
166         return cookie;
167 }
168
169 static int
170 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
171         struct iop_adma_chan *iop_chan)
172 {
173         /* the client is allowed to attach dependent operations
174          * until 'ack' is set
175          */
176         if (!async_tx_test_ack(&desc->async_tx))
177                 return 0;
178
179         /* leave the last descriptor in the chain
180          * so we can append to it
181          */
182         if (desc->chain_node.next == &iop_chan->chain)
183                 return 1;
184
185         dev_dbg(iop_chan->device->common.dev,
186                 "\tfree slot: %d slots_per_op: %d\n",
187                 desc->idx, desc->slots_per_op);
188
189         list_del(&desc->chain_node);
190         iop_adma_free_slots(desc);
191
192         return 0;
193 }
194
195 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
196 {
197         struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
198         dma_cookie_t cookie = 0;
199         u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
200         int busy = iop_chan_is_busy(iop_chan);
201         int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
202
203         dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
204         /* free completed slots from the chain starting with
205          * the oldest descriptor
206          */
207         list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
208                                         chain_node) {
209                 pr_debug("\tcookie: %d slot: %d busy: %d "
210                         "this_desc: %#x next_desc: %#x ack: %d\n",
211                         iter->async_tx.cookie, iter->idx, busy,
212                         iter->async_tx.phys, iop_desc_get_next_desc(iter),
213                         async_tx_test_ack(&iter->async_tx));
214                 prefetch(_iter);
215                 prefetch(&_iter->async_tx);
216
217                 /* do not advance past the current descriptor loaded into the
218                  * hardware channel, subsequent descriptors are either in
219                  * process or have not been submitted
220                  */
221                 if (seen_current)
222                         break;
223
224                 /* stop the search if we reach the current descriptor and the
225                  * channel is busy, or if it appears that the current descriptor
226                  * needs to be re-read (i.e. has been appended to)
227                  */
228                 if (iter->async_tx.phys == current_desc) {
229                         BUG_ON(seen_current++);
230                         if (busy || iop_desc_get_next_desc(iter))
231                                 break;
232                 }
233
234                 /* detect the start of a group transaction */
235                 if (!slot_cnt && !slots_per_op) {
236                         slot_cnt = iter->slot_cnt;
237                         slots_per_op = iter->slots_per_op;
238                         if (slot_cnt <= slots_per_op) {
239                                 slot_cnt = 0;
240                                 slots_per_op = 0;
241                         }
242                 }
243
244                 if (slot_cnt) {
245                         pr_debug("\tgroup++\n");
246                         if (!grp_start)
247                                 grp_start = iter;
248                         slot_cnt -= slots_per_op;
249                 }
250
251                 /* all the members of a group are complete */
252                 if (slots_per_op != 0 && slot_cnt == 0) {
253                         struct iop_adma_desc_slot *grp_iter, *_grp_iter;
254                         int end_of_chain = 0;
255                         pr_debug("\tgroup end\n");
256
257                         /* collect the total results */
258                         if (grp_start->xor_check_result) {
259                                 u32 zero_sum_result = 0;
260                                 slot_cnt = grp_start->slot_cnt;
261                                 grp_iter = grp_start;
262
263                                 list_for_each_entry_from(grp_iter,
264                                         &iop_chan->chain, chain_node) {
265                                         zero_sum_result |=
266                                             iop_desc_get_zero_result(grp_iter);
267                                             pr_debug("\titer%d result: %d\n",
268                                             grp_iter->idx, zero_sum_result);
269                                         slot_cnt -= slots_per_op;
270                                         if (slot_cnt == 0)
271                                                 break;
272                                 }
273                                 pr_debug("\tgrp_start->xor_check_result: %p\n",
274                                         grp_start->xor_check_result);
275                                 *grp_start->xor_check_result = zero_sum_result;
276                         }
277
278                         /* clean up the group */
279                         slot_cnt = grp_start->slot_cnt;
280                         grp_iter = grp_start;
281                         list_for_each_entry_safe_from(grp_iter, _grp_iter,
282                                 &iop_chan->chain, chain_node) {
283                                 cookie = iop_adma_run_tx_complete_actions(
284                                         grp_iter, iop_chan, cookie);
285
286                                 slot_cnt -= slots_per_op;
287                                 end_of_chain = iop_adma_clean_slot(grp_iter,
288                                         iop_chan);
289
290                                 if (slot_cnt == 0 || end_of_chain)
291                                         break;
292                         }
293
294                         /* the group should be complete at this point */
295                         BUG_ON(slot_cnt);
296
297                         slots_per_op = 0;
298                         grp_start = NULL;
299                         if (end_of_chain)
300                                 break;
301                         else
302                                 continue;
303                 } else if (slots_per_op) /* wait for group completion */
304                         continue;
305
306                 /* write back zero sum results (single descriptor case) */
307                 if (iter->xor_check_result && iter->async_tx.cookie)
308                         *iter->xor_check_result =
309                                 iop_desc_get_zero_result(iter);
310
311                 cookie = iop_adma_run_tx_complete_actions(
312                                         iter, iop_chan, cookie);
313
314                 if (iop_adma_clean_slot(iter, iop_chan))
315                         break;
316         }
317
318         if (cookie > 0) {
319                 iop_chan->completed_cookie = cookie;
320                 pr_debug("\tcompleted cookie %d\n", cookie);
321         }
322 }
323
324 static void
325 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
326 {
327         spin_lock_bh(&iop_chan->lock);
328         __iop_adma_slot_cleanup(iop_chan);
329         spin_unlock_bh(&iop_chan->lock);
330 }
331
332 static void iop_adma_tasklet(unsigned long data)
333 {
334         struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data;
335
336         /* lockdep will flag depedency submissions as potentially
337          * recursive locking, this is not the case as a dependency
338          * submission will never recurse a channels submit routine.
339          * There are checks in async_tx.c to prevent this.
340          */
341         spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING);
342         __iop_adma_slot_cleanup(iop_chan);
343         spin_unlock(&iop_chan->lock);
344 }
345
346 static struct iop_adma_desc_slot *
347 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
348                         int slots_per_op)
349 {
350         struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
351         LIST_HEAD(chain);
352         int slots_found, retry = 0;
353
354         /* start search from the last allocated descrtiptor
355          * if a contiguous allocation can not be found start searching
356          * from the beginning of the list
357          */
358 retry:
359         slots_found = 0;
360         if (retry == 0)
361                 iter = iop_chan->last_used;
362         else
363                 iter = list_entry(&iop_chan->all_slots,
364                         struct iop_adma_desc_slot,
365                         slot_node);
366
367         list_for_each_entry_safe_continue(
368                 iter, _iter, &iop_chan->all_slots, slot_node) {
369                 prefetch(_iter);
370                 prefetch(&_iter->async_tx);
371                 if (iter->slots_per_op) {
372                         /* give up after finding the first busy slot
373                          * on the second pass through the list
374                          */
375                         if (retry)
376                                 break;
377
378                         slots_found = 0;
379                         continue;
380                 }
381
382                 /* start the allocation if the slot is correctly aligned */
383                 if (!slots_found++) {
384                         if (iop_desc_is_aligned(iter, slots_per_op))
385                                 alloc_start = iter;
386                         else {
387                                 slots_found = 0;
388                                 continue;
389                         }
390                 }
391
392                 if (slots_found == num_slots) {
393                         struct iop_adma_desc_slot *alloc_tail = NULL;
394                         struct iop_adma_desc_slot *last_used = NULL;
395                         iter = alloc_start;
396                         while (num_slots) {
397                                 int i;
398                                 dev_dbg(iop_chan->device->common.dev,
399                                         "allocated slot: %d "
400                                         "(desc %p phys: %#x) slots_per_op %d\n",
401                                         iter->idx, iter->hw_desc,
402                                         iter->async_tx.phys, slots_per_op);
403
404                                 /* pre-ack all but the last descriptor */
405                                 if (num_slots != slots_per_op)
406                                         async_tx_ack(&iter->async_tx);
407
408                                 list_add_tail(&iter->chain_node, &chain);
409                                 alloc_tail = iter;
410                                 iter->async_tx.cookie = 0;
411                                 iter->slot_cnt = num_slots;
412                                 iter->xor_check_result = NULL;
413                                 for (i = 0; i < slots_per_op; i++) {
414                                         iter->slots_per_op = slots_per_op - i;
415                                         last_used = iter;
416                                         iter = list_entry(iter->slot_node.next,
417                                                 struct iop_adma_desc_slot,
418                                                 slot_node);
419                                 }
420                                 num_slots -= slots_per_op;
421                         }
422                         alloc_tail->group_head = alloc_start;
423                         alloc_tail->async_tx.cookie = -EBUSY;
424                         list_splice(&chain, &alloc_tail->async_tx.tx_list);
425                         iop_chan->last_used = last_used;
426                         iop_desc_clear_next_desc(alloc_start);
427                         iop_desc_clear_next_desc(alloc_tail);
428                         return alloc_tail;
429                 }
430         }
431         if (!retry++)
432                 goto retry;
433
434         /* perform direct reclaim if the allocation fails */
435         __iop_adma_slot_cleanup(iop_chan);
436
437         return NULL;
438 }
439
440 static dma_cookie_t
441 iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
442         struct iop_adma_desc_slot *desc)
443 {
444         dma_cookie_t cookie = iop_chan->common.cookie;
445         cookie++;
446         if (cookie < 0)
447                 cookie = 1;
448         iop_chan->common.cookie = desc->async_tx.cookie = cookie;
449         return cookie;
450 }
451
452 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
453 {
454         dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
455                 iop_chan->pending);
456
457         if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
458                 iop_chan->pending = 0;
459                 iop_chan_append(iop_chan);
460         }
461 }
462
463 static dma_cookie_t
464 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
465 {
466         struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
467         struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
468         struct iop_adma_desc_slot *grp_start, *old_chain_tail;
469         int slot_cnt;
470         int slots_per_op;
471         dma_cookie_t cookie;
472         dma_addr_t next_dma;
473
474         grp_start = sw_desc->group_head;
475         slot_cnt = grp_start->slot_cnt;
476         slots_per_op = grp_start->slots_per_op;
477
478         spin_lock_bh(&iop_chan->lock);
479         cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
480
481         old_chain_tail = list_entry(iop_chan->chain.prev,
482                 struct iop_adma_desc_slot, chain_node);
483         list_splice_init(&sw_desc->async_tx.tx_list,
484                          &old_chain_tail->chain_node);
485
486         /* fix up the hardware chain */
487         next_dma = grp_start->async_tx.phys;
488         iop_desc_set_next_desc(old_chain_tail, next_dma);
489         BUG_ON(iop_desc_get_next_desc(old_chain_tail) != next_dma); /* flush */
490
491         /* check for pre-chained descriptors */
492         iop_paranoia(iop_desc_get_next_desc(sw_desc));
493
494         /* increment the pending count by the number of slots
495          * memcpy operations have a 1:1 (slot:operation) relation
496          * other operations are heavier and will pop the threshold
497          * more often.
498          */
499         iop_chan->pending += slot_cnt;
500         iop_adma_check_threshold(iop_chan);
501         spin_unlock_bh(&iop_chan->lock);
502
503         dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
504                 __func__, sw_desc->async_tx.cookie, sw_desc->idx);
505
506         return cookie;
507 }
508
509 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
510 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
511
512 /**
513  * iop_adma_alloc_chan_resources -  returns the number of allocated descriptors
514  * @chan - allocate descriptor resources for this channel
515  * @client - current client requesting the channel be ready for requests
516  *
517  * Note: We keep the slots for 1 operation on iop_chan->chain at all times.  To
518  * avoid deadlock, via async_xor, num_descs_in_pool must at a minimum be
519  * greater than 2x the number slots needed to satisfy a device->max_xor
520  * request.
521  * */
522 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
523 {
524         char *hw_desc;
525         int idx;
526         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
527         struct iop_adma_desc_slot *slot = NULL;
528         int init = iop_chan->slots_allocated ? 0 : 1;
529         struct iop_adma_platform_data *plat_data =
530                 iop_chan->device->pdev->dev.platform_data;
531         int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
532
533         /* Allocate descriptor slots */
534         do {
535                 idx = iop_chan->slots_allocated;
536                 if (idx == num_descs_in_pool)
537                         break;
538
539                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
540                 if (!slot) {
541                         printk(KERN_INFO "IOP ADMA Channel only initialized"
542                                 " %d descriptor slots", idx);
543                         break;
544                 }
545                 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
546                 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
547
548                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
549                 slot->async_tx.tx_submit = iop_adma_tx_submit;
550                 INIT_LIST_HEAD(&slot->chain_node);
551                 INIT_LIST_HEAD(&slot->slot_node);
552                 hw_desc = (char *) iop_chan->device->dma_desc_pool;
553                 slot->async_tx.phys =
554                         (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
555                 slot->idx = idx;
556
557                 spin_lock_bh(&iop_chan->lock);
558                 iop_chan->slots_allocated++;
559                 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
560                 spin_unlock_bh(&iop_chan->lock);
561         } while (iop_chan->slots_allocated < num_descs_in_pool);
562
563         if (idx && !iop_chan->last_used)
564                 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
565                                         struct iop_adma_desc_slot,
566                                         slot_node);
567
568         dev_dbg(iop_chan->device->common.dev,
569                 "allocated %d descriptor slots last_used: %p\n",
570                 iop_chan->slots_allocated, iop_chan->last_used);
571
572         /* initialize the channel and the chain with a null operation */
573         if (init) {
574                 if (dma_has_cap(DMA_MEMCPY,
575                         iop_chan->device->common.cap_mask))
576                         iop_chan_start_null_memcpy(iop_chan);
577                 else if (dma_has_cap(DMA_XOR,
578                         iop_chan->device->common.cap_mask))
579                         iop_chan_start_null_xor(iop_chan);
580                 else
581                         BUG();
582         }
583
584         return (idx > 0) ? idx : -ENOMEM;
585 }
586
587 static struct dma_async_tx_descriptor *
588 iop_adma_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
589 {
590         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
591         struct iop_adma_desc_slot *sw_desc, *grp_start;
592         int slot_cnt, slots_per_op;
593
594         dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
595
596         spin_lock_bh(&iop_chan->lock);
597         slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
598         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
599         if (sw_desc) {
600                 grp_start = sw_desc->group_head;
601                 iop_desc_init_interrupt(grp_start, iop_chan);
602                 grp_start->unmap_len = 0;
603                 sw_desc->async_tx.flags = flags;
604         }
605         spin_unlock_bh(&iop_chan->lock);
606
607         return sw_desc ? &sw_desc->async_tx : NULL;
608 }
609
610 static struct dma_async_tx_descriptor *
611 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
612                          dma_addr_t dma_src, size_t len, unsigned long flags)
613 {
614         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
615         struct iop_adma_desc_slot *sw_desc, *grp_start;
616         int slot_cnt, slots_per_op;
617
618         if (unlikely(!len))
619                 return NULL;
620         BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
621
622         dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
623                 __func__, len);
624
625         spin_lock_bh(&iop_chan->lock);
626         slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
627         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
628         if (sw_desc) {
629                 grp_start = sw_desc->group_head;
630                 iop_desc_init_memcpy(grp_start, flags);
631                 iop_desc_set_byte_count(grp_start, iop_chan, len);
632                 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
633                 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
634                 sw_desc->unmap_src_cnt = 1;
635                 sw_desc->unmap_len = len;
636                 sw_desc->async_tx.flags = flags;
637         }
638         spin_unlock_bh(&iop_chan->lock);
639
640         return sw_desc ? &sw_desc->async_tx : NULL;
641 }
642
643 static struct dma_async_tx_descriptor *
644 iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
645                          int value, size_t len, unsigned long flags)
646 {
647         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
648         struct iop_adma_desc_slot *sw_desc, *grp_start;
649         int slot_cnt, slots_per_op;
650
651         if (unlikely(!len))
652                 return NULL;
653         BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
654
655         dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
656                 __func__, len);
657
658         spin_lock_bh(&iop_chan->lock);
659         slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
660         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
661         if (sw_desc) {
662                 grp_start = sw_desc->group_head;
663                 iop_desc_init_memset(grp_start, flags);
664                 iop_desc_set_byte_count(grp_start, iop_chan, len);
665                 iop_desc_set_block_fill_val(grp_start, value);
666                 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
667                 sw_desc->unmap_src_cnt = 1;
668                 sw_desc->unmap_len = len;
669                 sw_desc->async_tx.flags = flags;
670         }
671         spin_unlock_bh(&iop_chan->lock);
672
673         return sw_desc ? &sw_desc->async_tx : NULL;
674 }
675
676 static struct dma_async_tx_descriptor *
677 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
678                       dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
679                       unsigned long flags)
680 {
681         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
682         struct iop_adma_desc_slot *sw_desc, *grp_start;
683         int slot_cnt, slots_per_op;
684
685         if (unlikely(!len))
686                 return NULL;
687         BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
688
689         dev_dbg(iop_chan->device->common.dev,
690                 "%s src_cnt: %d len: %u flags: %lx\n",
691                 __func__, src_cnt, len, flags);
692
693         spin_lock_bh(&iop_chan->lock);
694         slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
695         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
696         if (sw_desc) {
697                 grp_start = sw_desc->group_head;
698                 iop_desc_init_xor(grp_start, src_cnt, flags);
699                 iop_desc_set_byte_count(grp_start, iop_chan, len);
700                 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
701                 sw_desc->unmap_src_cnt = src_cnt;
702                 sw_desc->unmap_len = len;
703                 sw_desc->async_tx.flags = flags;
704                 while (src_cnt--)
705                         iop_desc_set_xor_src_addr(grp_start, src_cnt,
706                                                   dma_src[src_cnt]);
707         }
708         spin_unlock_bh(&iop_chan->lock);
709
710         return sw_desc ? &sw_desc->async_tx : NULL;
711 }
712
713 static struct dma_async_tx_descriptor *
714 iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
715                           unsigned int src_cnt, size_t len, u32 *result,
716                           unsigned long flags)
717 {
718         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
719         struct iop_adma_desc_slot *sw_desc, *grp_start;
720         int slot_cnt, slots_per_op;
721
722         if (unlikely(!len))
723                 return NULL;
724
725         dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
726                 __func__, src_cnt, len);
727
728         spin_lock_bh(&iop_chan->lock);
729         slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
730         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
731         if (sw_desc) {
732                 grp_start = sw_desc->group_head;
733                 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
734                 iop_desc_set_zero_sum_byte_count(grp_start, len);
735                 grp_start->xor_check_result = result;
736                 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
737                         __func__, grp_start->xor_check_result);
738                 sw_desc->unmap_src_cnt = src_cnt;
739                 sw_desc->unmap_len = len;
740                 sw_desc->async_tx.flags = flags;
741                 while (src_cnt--)
742                         iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
743                                                        dma_src[src_cnt]);
744         }
745         spin_unlock_bh(&iop_chan->lock);
746
747         return sw_desc ? &sw_desc->async_tx : NULL;
748 }
749
750 static struct dma_async_tx_descriptor *
751 iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
752                      unsigned int src_cnt, const unsigned char *scf, size_t len,
753                      unsigned long flags)
754 {
755         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
756         struct iop_adma_desc_slot *sw_desc, *g;
757         int slot_cnt, slots_per_op;
758         int continue_srcs;
759
760         if (unlikely(!len))
761                 return NULL;
762         BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
763
764         dev_dbg(iop_chan->device->common.dev,
765                 "%s src_cnt: %d len: %u flags: %lx\n",
766                 __func__, src_cnt, len, flags);
767
768         if (dmaf_p_disabled_continue(flags))
769                 continue_srcs = 1+src_cnt;
770         else if (dmaf_continue(flags))
771                 continue_srcs = 3+src_cnt;
772         else
773                 continue_srcs = 0+src_cnt;
774
775         spin_lock_bh(&iop_chan->lock);
776         slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op);
777         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
778         if (sw_desc) {
779                 int i;
780
781                 g = sw_desc->group_head;
782                 iop_desc_set_byte_count(g, iop_chan, len);
783
784                 /* even if P is disabled its destination address (bits
785                  * [3:0]) must match Q.  It is ok if P points to an
786                  * invalid address, it won't be written.
787                  */
788                 if (flags & DMA_PREP_PQ_DISABLE_P)
789                         dst[0] = dst[1] & 0x7;
790
791                 iop_desc_set_pq_addr(g, dst);
792                 sw_desc->unmap_src_cnt = src_cnt;
793                 sw_desc->unmap_len = len;
794                 sw_desc->async_tx.flags = flags;
795                 for (i = 0; i < src_cnt; i++)
796                         iop_desc_set_pq_src_addr(g, i, src[i], scf[i]);
797
798                 /* if we are continuing a previous operation factor in
799                  * the old p and q values, see the comment for dma_maxpq
800                  * in include/linux/dmaengine.h
801                  */
802                 if (dmaf_p_disabled_continue(flags))
803                         iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
804                 else if (dmaf_continue(flags)) {
805                         iop_desc_set_pq_src_addr(g, i++, dst[0], 0);
806                         iop_desc_set_pq_src_addr(g, i++, dst[1], 1);
807                         iop_desc_set_pq_src_addr(g, i++, dst[1], 0);
808                 }
809                 iop_desc_init_pq(g, i, flags);
810         }
811         spin_unlock_bh(&iop_chan->lock);
812
813         return sw_desc ? &sw_desc->async_tx : NULL;
814 }
815
816 static struct dma_async_tx_descriptor *
817 iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
818                          unsigned int src_cnt, const unsigned char *scf,
819                          size_t len, enum sum_check_flags *pqres,
820                          unsigned long flags)
821 {
822         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
823         struct iop_adma_desc_slot *sw_desc, *g;
824         int slot_cnt, slots_per_op;
825
826         if (unlikely(!len))
827                 return NULL;
828         BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
829
830         dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
831                 __func__, src_cnt, len);
832
833         spin_lock_bh(&iop_chan->lock);
834         slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op);
835         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
836         if (sw_desc) {
837                 /* for validate operations p and q are tagged onto the
838                  * end of the source list
839                  */
840                 int pq_idx = src_cnt;
841
842                 g = sw_desc->group_head;
843                 iop_desc_init_pq_zero_sum(g, src_cnt+2, flags);
844                 iop_desc_set_pq_zero_sum_byte_count(g, len);
845                 g->pq_check_result = pqres;
846                 pr_debug("\t%s: g->pq_check_result: %p\n",
847                         __func__, g->pq_check_result);
848                 sw_desc->unmap_src_cnt = src_cnt+2;
849                 sw_desc->unmap_len = len;
850                 sw_desc->async_tx.flags = flags;
851                 while (src_cnt--)
852                         iop_desc_set_pq_zero_sum_src_addr(g, src_cnt,
853                                                           src[src_cnt],
854                                                           scf[src_cnt]);
855                 iop_desc_set_pq_zero_sum_addr(g, pq_idx, src);
856         }
857         spin_unlock_bh(&iop_chan->lock);
858
859         return sw_desc ? &sw_desc->async_tx : NULL;
860 }
861
862 static void iop_adma_free_chan_resources(struct dma_chan *chan)
863 {
864         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
865         struct iop_adma_desc_slot *iter, *_iter;
866         int in_use_descs = 0;
867
868         iop_adma_slot_cleanup(iop_chan);
869
870         spin_lock_bh(&iop_chan->lock);
871         list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
872                                         chain_node) {
873                 in_use_descs++;
874                 list_del(&iter->chain_node);
875         }
876         list_for_each_entry_safe_reverse(
877                 iter, _iter, &iop_chan->all_slots, slot_node) {
878                 list_del(&iter->slot_node);
879                 kfree(iter);
880                 iop_chan->slots_allocated--;
881         }
882         iop_chan->last_used = NULL;
883
884         dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
885                 __func__, iop_chan->slots_allocated);
886         spin_unlock_bh(&iop_chan->lock);
887
888         /* one is ok since we left it on there on purpose */
889         if (in_use_descs > 1)
890                 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
891                         in_use_descs - 1);
892 }
893
894 /**
895  * iop_adma_is_complete - poll the status of an ADMA transaction
896  * @chan: ADMA channel handle
897  * @cookie: ADMA transaction identifier
898  */
899 static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
900                                         dma_cookie_t cookie,
901                                         dma_cookie_t *done,
902                                         dma_cookie_t *used)
903 {
904         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
905         dma_cookie_t last_used;
906         dma_cookie_t last_complete;
907         enum dma_status ret;
908
909         last_used = chan->cookie;
910         last_complete = iop_chan->completed_cookie;
911
912         if (done)
913                 *done = last_complete;
914         if (used)
915                 *used = last_used;
916
917         ret = dma_async_is_complete(cookie, last_complete, last_used);
918         if (ret == DMA_SUCCESS)
919                 return ret;
920
921         iop_adma_slot_cleanup(iop_chan);
922
923         last_used = chan->cookie;
924         last_complete = iop_chan->completed_cookie;
925
926         if (done)
927                 *done = last_complete;
928         if (used)
929                 *used = last_used;
930
931         return dma_async_is_complete(cookie, last_complete, last_used);
932 }
933
934 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
935 {
936         struct iop_adma_chan *chan = data;
937
938         dev_dbg(chan->device->common.dev, "%s\n", __func__);
939
940         tasklet_schedule(&chan->irq_tasklet);
941
942         iop_adma_device_clear_eot_status(chan);
943
944         return IRQ_HANDLED;
945 }
946
947 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
948 {
949         struct iop_adma_chan *chan = data;
950
951         dev_dbg(chan->device->common.dev, "%s\n", __func__);
952
953         tasklet_schedule(&chan->irq_tasklet);
954
955         iop_adma_device_clear_eoc_status(chan);
956
957         return IRQ_HANDLED;
958 }
959
960 static irqreturn_t iop_adma_err_handler(int irq, void *data)
961 {
962         struct iop_adma_chan *chan = data;
963         unsigned long status = iop_chan_get_status(chan);
964
965         dev_printk(KERN_ERR, chan->device->common.dev,
966                 "error ( %s%s%s%s%s%s%s)\n",
967                 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
968                 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
969                 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
970                 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
971                 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
972                 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
973                 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
974
975         iop_adma_device_clear_err_status(chan);
976
977         BUG();
978
979         return IRQ_HANDLED;
980 }
981
982 static void iop_adma_issue_pending(struct dma_chan *chan)
983 {
984         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
985
986         if (iop_chan->pending) {
987                 iop_chan->pending = 0;
988                 iop_chan_append(iop_chan);
989         }
990 }
991
992 /*
993  * Perform a transaction to verify the HW works.
994  */
995 #define IOP_ADMA_TEST_SIZE 2000
996
997 static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
998 {
999         int i;
1000         void *src, *dest;
1001         dma_addr_t src_dma, dest_dma;
1002         struct dma_chan *dma_chan;
1003         dma_cookie_t cookie;
1004         struct dma_async_tx_descriptor *tx;
1005         int err = 0;
1006         struct iop_adma_chan *iop_chan;
1007
1008         dev_dbg(device->common.dev, "%s\n", __func__);
1009
1010         src = kmalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
1011         if (!src)
1012                 return -ENOMEM;
1013         dest = kzalloc(IOP_ADMA_TEST_SIZE, GFP_KERNEL);
1014         if (!dest) {
1015                 kfree(src);
1016                 return -ENOMEM;
1017         }
1018
1019         /* Fill in src buffer */
1020         for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
1021                 ((u8 *) src)[i] = (u8)i;
1022
1023         /* Start copy, using first DMA channel */
1024         dma_chan = container_of(device->common.channels.next,
1025                                 struct dma_chan,
1026                                 device_node);
1027         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1028                 err = -ENODEV;
1029                 goto out;
1030         }
1031
1032         dest_dma = dma_map_single(dma_chan->device->dev, dest,
1033                                 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
1034         src_dma = dma_map_single(dma_chan->device->dev, src,
1035                                 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
1036         tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
1037                                       IOP_ADMA_TEST_SIZE,
1038                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1039
1040         cookie = iop_adma_tx_submit(tx);
1041         iop_adma_issue_pending(dma_chan);
1042         msleep(1);
1043
1044         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1045                         DMA_SUCCESS) {
1046                 dev_printk(KERN_ERR, dma_chan->device->dev,
1047                         "Self-test copy timed out, disabling\n");
1048                 err = -ENODEV;
1049                 goto free_resources;
1050         }
1051
1052         iop_chan = to_iop_adma_chan(dma_chan);
1053         dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1054                 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
1055         if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
1056                 dev_printk(KERN_ERR, dma_chan->device->dev,
1057                         "Self-test copy failed compare, disabling\n");
1058                 err = -ENODEV;
1059                 goto free_resources;
1060         }
1061
1062 free_resources:
1063         iop_adma_free_chan_resources(dma_chan);
1064 out:
1065         kfree(src);
1066         kfree(dest);
1067         return err;
1068 }
1069
1070 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
1071 static int __devinit
1072 iop_adma_xor_val_self_test(struct iop_adma_device *device)
1073 {
1074         int i, src_idx;
1075         struct page *dest;
1076         struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
1077         struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1078         dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
1079         dma_addr_t dma_addr, dest_dma;
1080         struct dma_async_tx_descriptor *tx;
1081         struct dma_chan *dma_chan;
1082         dma_cookie_t cookie;
1083         u8 cmp_byte = 0;
1084         u32 cmp_word;
1085         u32 zero_sum_result;
1086         int err = 0;
1087         struct iop_adma_chan *iop_chan;
1088
1089         dev_dbg(device->common.dev, "%s\n", __func__);
1090
1091         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1092                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
1093                 if (!xor_srcs[src_idx]) {
1094                         while (src_idx--)
1095                                 __free_page(xor_srcs[src_idx]);
1096                         return -ENOMEM;
1097                 }
1098         }
1099
1100         dest = alloc_page(GFP_KERNEL);
1101         if (!dest) {
1102                 while (src_idx--)
1103                         __free_page(xor_srcs[src_idx]);
1104                 return -ENOMEM;
1105         }
1106
1107         /* Fill in src buffers */
1108         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
1109                 u8 *ptr = page_address(xor_srcs[src_idx]);
1110                 for (i = 0; i < PAGE_SIZE; i++)
1111                         ptr[i] = (1 << src_idx);
1112         }
1113
1114         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
1115                 cmp_byte ^= (u8) (1 << src_idx);
1116
1117         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
1118                         (cmp_byte << 8) | cmp_byte;
1119
1120         memset(page_address(dest), 0, PAGE_SIZE);
1121
1122         dma_chan = container_of(device->common.channels.next,
1123                                 struct dma_chan,
1124                                 device_node);
1125         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1126                 err = -ENODEV;
1127                 goto out;
1128         }
1129
1130         /* test xor */
1131         dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
1132                                 PAGE_SIZE, DMA_FROM_DEVICE);
1133         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1134                 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
1135                                            0, PAGE_SIZE, DMA_TO_DEVICE);
1136         tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1137                                    IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE,
1138                                    DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1139
1140         cookie = iop_adma_tx_submit(tx);
1141         iop_adma_issue_pending(dma_chan);
1142         msleep(8);
1143
1144         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1145                 DMA_SUCCESS) {
1146                 dev_printk(KERN_ERR, dma_chan->device->dev,
1147                         "Self-test xor timed out, disabling\n");
1148                 err = -ENODEV;
1149                 goto free_resources;
1150         }
1151
1152         iop_chan = to_iop_adma_chan(dma_chan);
1153         dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
1154                 PAGE_SIZE, DMA_FROM_DEVICE);
1155         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
1156                 u32 *ptr = page_address(dest);
1157                 if (ptr[i] != cmp_word) {
1158                         dev_printk(KERN_ERR, dma_chan->device->dev,
1159                                 "Self-test xor failed compare, disabling\n");
1160                         err = -ENODEV;
1161                         goto free_resources;
1162                 }
1163         }
1164         dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
1165                 PAGE_SIZE, DMA_TO_DEVICE);
1166
1167         /* skip zero sum if the capability is not present */
1168         if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1169                 goto free_resources;
1170
1171         /* zero sum the sources with the destintation page */
1172         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1173                 zero_sum_srcs[i] = xor_srcs[i];
1174         zero_sum_srcs[i] = dest;
1175
1176         zero_sum_result = 1;
1177
1178         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1179                 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1180                                            zero_sum_srcs[i], 0, PAGE_SIZE,
1181                                            DMA_TO_DEVICE);
1182         tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1183                                        IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1184                                        &zero_sum_result,
1185                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1186
1187         cookie = iop_adma_tx_submit(tx);
1188         iop_adma_issue_pending(dma_chan);
1189         msleep(8);
1190
1191         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1192                 dev_printk(KERN_ERR, dma_chan->device->dev,
1193                         "Self-test zero sum timed out, disabling\n");
1194                 err = -ENODEV;
1195                 goto free_resources;
1196         }
1197
1198         if (zero_sum_result != 0) {
1199                 dev_printk(KERN_ERR, dma_chan->device->dev,
1200                         "Self-test zero sum failed compare, disabling\n");
1201                 err = -ENODEV;
1202                 goto free_resources;
1203         }
1204
1205         /* test memset */
1206         dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1207                         PAGE_SIZE, DMA_FROM_DEVICE);
1208         tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1209                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1210
1211         cookie = iop_adma_tx_submit(tx);
1212         iop_adma_issue_pending(dma_chan);
1213         msleep(8);
1214
1215         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1216                 dev_printk(KERN_ERR, dma_chan->device->dev,
1217                         "Self-test memset timed out, disabling\n");
1218                 err = -ENODEV;
1219                 goto free_resources;
1220         }
1221
1222         for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1223                 u32 *ptr = page_address(dest);
1224                 if (ptr[i]) {
1225                         dev_printk(KERN_ERR, dma_chan->device->dev,
1226                                 "Self-test memset failed compare, disabling\n");
1227                         err = -ENODEV;
1228                         goto free_resources;
1229                 }
1230         }
1231
1232         /* test for non-zero parity sum */
1233         zero_sum_result = 0;
1234         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1235                 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1236                                            zero_sum_srcs[i], 0, PAGE_SIZE,
1237                                            DMA_TO_DEVICE);
1238         tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs,
1239                                        IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1240                                        &zero_sum_result,
1241                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1242
1243         cookie = iop_adma_tx_submit(tx);
1244         iop_adma_issue_pending(dma_chan);
1245         msleep(8);
1246
1247         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1248                 dev_printk(KERN_ERR, dma_chan->device->dev,
1249                         "Self-test non-zero sum timed out, disabling\n");
1250                 err = -ENODEV;
1251                 goto free_resources;
1252         }
1253
1254         if (zero_sum_result != 1) {
1255                 dev_printk(KERN_ERR, dma_chan->device->dev,
1256                         "Self-test non-zero sum failed compare, disabling\n");
1257                 err = -ENODEV;
1258                 goto free_resources;
1259         }
1260
1261 free_resources:
1262         iop_adma_free_chan_resources(dma_chan);
1263 out:
1264         src_idx = IOP_ADMA_NUM_SRC_TEST;
1265         while (src_idx--)
1266                 __free_page(xor_srcs[src_idx]);
1267         __free_page(dest);
1268         return err;
1269 }
1270
1271 #ifdef CONFIG_MD_RAID6_PQ
1272 static int __devinit
1273 iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1274 {
1275         /* combined sources, software pq results, and extra hw pq results */
1276         struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2];
1277         /* ptr to the extra hw pq buffers defined above */
1278         struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1279         /* address conversion buffers (dma_map / page_address) */
1280         void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1281         dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST];
1282         dma_addr_t pq_dest[2];
1283
1284         int i;
1285         struct dma_async_tx_descriptor *tx;
1286         struct dma_chan *dma_chan;
1287         dma_cookie_t cookie;
1288         u32 zero_sum_result;
1289         int err = 0;
1290         struct device *dev;
1291
1292         dev_dbg(device->common.dev, "%s\n", __func__);
1293
1294         for (i = 0; i < ARRAY_SIZE(pq); i++) {
1295                 pq[i] = alloc_page(GFP_KERNEL);
1296                 if (!pq[i]) {
1297                         while (i--)
1298                                 __free_page(pq[i]);
1299                         return -ENOMEM;
1300                 }
1301         }
1302
1303         /* Fill in src buffers */
1304         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) {
1305                 pq_sw[i] = page_address(pq[i]);
1306                 memset(pq_sw[i], 0x11111111 * (1<<i), PAGE_SIZE);
1307         }
1308         pq_sw[i] = page_address(pq[i]);
1309         pq_sw[i+1] = page_address(pq[i+1]);
1310
1311         dma_chan = container_of(device->common.channels.next,
1312                                 struct dma_chan,
1313                                 device_node);
1314         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
1315                 err = -ENODEV;
1316                 goto out;
1317         }
1318
1319         dev = dma_chan->device->dev;
1320
1321         /* initialize the dests */
1322         memset(page_address(pq_hw[0]), 0 , PAGE_SIZE);
1323         memset(page_address(pq_hw[1]), 0 , PAGE_SIZE);
1324
1325         /* test pq */
1326         pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1327         pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE);
1328         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
1329                 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1330                                          DMA_TO_DEVICE);
1331
1332         tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src,
1333                                   IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp,
1334                                   PAGE_SIZE,
1335                                   DMA_PREP_INTERRUPT |
1336                                   DMA_CTRL_ACK);
1337
1338         cookie = iop_adma_tx_submit(tx);
1339         iop_adma_issue_pending(dma_chan);
1340         msleep(8);
1341
1342         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1343                 DMA_SUCCESS) {
1344                 dev_err(dev, "Self-test pq timed out, disabling\n");
1345                 err = -ENODEV;
1346                 goto free_resources;
1347         }
1348
1349         raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw);
1350
1351         if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST],
1352                    page_address(pq_hw[0]), PAGE_SIZE) != 0) {
1353                 dev_err(dev, "Self-test p failed compare, disabling\n");
1354                 err = -ENODEV;
1355                 goto free_resources;
1356         }
1357         if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1],
1358                    page_address(pq_hw[1]), PAGE_SIZE) != 0) {
1359                 dev_err(dev, "Self-test q failed compare, disabling\n");
1360                 err = -ENODEV;
1361                 goto free_resources;
1362         }
1363
1364         /* test correct zero sum using the software generated pq values */
1365         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1366                 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1367                                          DMA_TO_DEVICE);
1368
1369         zero_sum_result = ~0;
1370         tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1371                                       pq_src, IOP_ADMA_NUM_SRC_TEST,
1372                                       raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1373                                       DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1374
1375         cookie = iop_adma_tx_submit(tx);
1376         iop_adma_issue_pending(dma_chan);
1377         msleep(8);
1378
1379         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1380                 DMA_SUCCESS) {
1381                 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n");
1382                 err = -ENODEV;
1383                 goto free_resources;
1384         }
1385
1386         if (zero_sum_result != 0) {
1387                 dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n",
1388                         zero_sum_result);
1389                 err = -ENODEV;
1390                 goto free_resources;
1391         }
1392
1393         /* test incorrect zero sum */
1394         i = IOP_ADMA_NUM_SRC_TEST;
1395         memset(pq_sw[i] + 100, 0, 100);
1396         memset(pq_sw[i+1] + 200, 0, 200);
1397         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++)
1398                 pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE,
1399                                          DMA_TO_DEVICE);
1400
1401         zero_sum_result = 0;
1402         tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST],
1403                                       pq_src, IOP_ADMA_NUM_SRC_TEST,
1404                                       raid6_gfexp, PAGE_SIZE, &zero_sum_result,
1405                                       DMA_PREP_INTERRUPT|DMA_CTRL_ACK);
1406
1407         cookie = iop_adma_tx_submit(tx);
1408         iop_adma_issue_pending(dma_chan);
1409         msleep(8);
1410
1411         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
1412                 DMA_SUCCESS) {
1413                 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n");
1414                 err = -ENODEV;
1415                 goto free_resources;
1416         }
1417
1418         if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) {
1419                 dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n",
1420                         zero_sum_result);
1421                 err = -ENODEV;
1422                 goto free_resources;
1423         }
1424
1425 free_resources:
1426         iop_adma_free_chan_resources(dma_chan);
1427 out:
1428         i = ARRAY_SIZE(pq);
1429         while (i--)
1430                 __free_page(pq[i]);
1431         return err;
1432 }
1433 #endif
1434
1435 static int __devexit iop_adma_remove(struct platform_device *dev)
1436 {
1437         struct iop_adma_device *device = platform_get_drvdata(dev);
1438         struct dma_chan *chan, *_chan;
1439         struct iop_adma_chan *iop_chan;
1440         struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1441
1442         dma_async_device_unregister(&device->common);
1443
1444         dma_free_coherent(&dev->dev, plat_data->pool_size,
1445                         device->dma_desc_pool_virt, device->dma_desc_pool);
1446
1447         list_for_each_entry_safe(chan, _chan, &device->common.channels,
1448                                 device_node) {
1449                 iop_chan = to_iop_adma_chan(chan);
1450                 list_del(&chan->device_node);
1451                 kfree(iop_chan);
1452         }
1453         kfree(device);
1454
1455         return 0;
1456 }
1457
1458 static int __devinit iop_adma_probe(struct platform_device *pdev)
1459 {
1460         struct resource *res;
1461         int ret = 0, i;
1462         struct iop_adma_device *adev;
1463         struct iop_adma_chan *iop_chan;
1464         struct dma_device *dma_dev;
1465         struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
1466
1467         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1468         if (!res)
1469                 return -ENODEV;
1470
1471         if (!devm_request_mem_region(&pdev->dev, res->start,
1472                                 res->end - res->start, pdev->name))
1473                 return -EBUSY;
1474
1475         adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1476         if (!adev)
1477                 return -ENOMEM;
1478         dma_dev = &adev->common;
1479
1480         /* allocate coherent memory for hardware descriptors
1481          * note: writecombine gives slightly better performance, but
1482          * requires that we explicitly flush the writes
1483          */
1484         if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1485                                         plat_data->pool_size,
1486                                         &adev->dma_desc_pool,
1487                                         GFP_KERNEL)) == NULL) {
1488                 ret = -ENOMEM;
1489                 goto err_free_adev;
1490         }
1491
1492         dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
1493                 __func__, adev->dma_desc_pool_virt,
1494                 (void *) adev->dma_desc_pool);
1495
1496         adev->id = plat_data->hw_id;
1497
1498         /* discover transaction capabilites from the platform data */
1499         dma_dev->cap_mask = plat_data->cap_mask;
1500
1501         adev->pdev = pdev;
1502         platform_set_drvdata(pdev, adev);
1503
1504         INIT_LIST_HEAD(&dma_dev->channels);
1505
1506         /* set base routines */
1507         dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1508         dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1509         dma_dev->device_is_tx_complete = iop_adma_is_complete;
1510         dma_dev->device_issue_pending = iop_adma_issue_pending;
1511         dma_dev->dev = &pdev->dev;
1512
1513         /* set prep routines based on capability */
1514         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1515                 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1516         if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1517                 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
1518         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1519                 dma_dev->max_xor = iop_adma_get_max_xor();
1520                 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1521         }
1522         if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask))
1523                 dma_dev->device_prep_dma_xor_val =
1524                         iop_adma_prep_dma_xor_val;
1525         if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1526                 dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0);
1527                 dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq;
1528         }
1529         if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask))
1530                 dma_dev->device_prep_dma_pq_val =
1531                         iop_adma_prep_dma_pq_val;
1532         if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1533                 dma_dev->device_prep_dma_interrupt =
1534                         iop_adma_prep_dma_interrupt;
1535
1536         iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1537         if (!iop_chan) {
1538                 ret = -ENOMEM;
1539                 goto err_free_dma;
1540         }
1541         iop_chan->device = adev;
1542
1543         iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1544                                         res->end - res->start);
1545         if (!iop_chan->mmr_base) {
1546                 ret = -ENOMEM;
1547                 goto err_free_iop_chan;
1548         }
1549         tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1550                 iop_chan);
1551
1552         /* clear errors before enabling interrupts */
1553         iop_adma_device_clear_err_status(iop_chan);
1554
1555         for (i = 0; i < 3; i++) {
1556                 irq_handler_t handler[] = { iop_adma_eot_handler,
1557                                         iop_adma_eoc_handler,
1558                                         iop_adma_err_handler };
1559                 int irq = platform_get_irq(pdev, i);
1560                 if (irq < 0) {
1561                         ret = -ENXIO;
1562                         goto err_free_iop_chan;
1563                 } else {
1564                         ret = devm_request_irq(&pdev->dev, irq,
1565                                         handler[i], 0, pdev->name, iop_chan);
1566                         if (ret)
1567                                 goto err_free_iop_chan;
1568                 }
1569         }
1570
1571         spin_lock_init(&iop_chan->lock);
1572         INIT_LIST_HEAD(&iop_chan->chain);
1573         INIT_LIST_HEAD(&iop_chan->all_slots);
1574         iop_chan->common.device = dma_dev;
1575         list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1576
1577         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1578                 ret = iop_adma_memcpy_self_test(adev);
1579                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1580                 if (ret)
1581                         goto err_free_iop_chan;
1582         }
1583
1584         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
1585             dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1586                 ret = iop_adma_xor_val_self_test(adev);
1587                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1588                 if (ret)
1589                         goto err_free_iop_chan;
1590         }
1591
1592         if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) &&
1593             dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) {
1594                 #ifdef CONFIG_MD_RAID6_PQ
1595                 ret = iop_adma_pq_zero_sum_self_test(adev);
1596                 dev_dbg(&pdev->dev, "pq self test returned %d\n", ret);
1597                 #else
1598                 /* can not test raid6, so do not publish capability */
1599                 dma_cap_clear(DMA_PQ, dma_dev->cap_mask);
1600                 dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask);
1601                 ret = 0;
1602                 #endif
1603                 if (ret)
1604                         goto err_free_iop_chan;
1605         }
1606
1607         dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
1608           "( %s%s%s%s%s%s%s%s%s%s)\n",
1609           dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "",
1610           dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",
1611           dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "",
1612           dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1613           dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",
1614           dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "",
1615           dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
1616           dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",
1617           dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1618           dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1619
1620         dma_async_device_register(dma_dev);
1621         goto out;
1622
1623  err_free_iop_chan:
1624         kfree(iop_chan);
1625  err_free_dma:
1626         dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1627                         adev->dma_desc_pool_virt, adev->dma_desc_pool);
1628  err_free_adev:
1629         kfree(adev);
1630  out:
1631         return ret;
1632 }
1633
1634 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1635 {
1636         struct iop_adma_desc_slot *sw_desc, *grp_start;
1637         dma_cookie_t cookie;
1638         int slot_cnt, slots_per_op;
1639
1640         dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1641
1642         spin_lock_bh(&iop_chan->lock);
1643         slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1644         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1645         if (sw_desc) {
1646                 grp_start = sw_desc->group_head;
1647
1648                 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
1649                 async_tx_ack(&sw_desc->async_tx);
1650                 iop_desc_init_memcpy(grp_start, 0);
1651                 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1652                 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1653                 iop_desc_set_memcpy_src_addr(grp_start, 0);
1654
1655                 cookie = iop_chan->common.cookie;
1656                 cookie++;
1657                 if (cookie <= 1)
1658                         cookie = 2;
1659
1660                 /* initialize the completed cookie to be less than
1661                  * the most recently used cookie
1662                  */
1663                 iop_chan->completed_cookie = cookie - 1;
1664                 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1665
1666                 /* channel should not be busy */
1667                 BUG_ON(iop_chan_is_busy(iop_chan));
1668
1669                 /* clear any prior error-status bits */
1670                 iop_adma_device_clear_err_status(iop_chan);
1671
1672                 /* disable operation */
1673                 iop_chan_disable(iop_chan);
1674
1675                 /* set the descriptor address */
1676                 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1677
1678                 /* 1/ don't add pre-chained descriptors
1679                  * 2/ dummy read to flush next_desc write
1680                  */
1681                 BUG_ON(iop_desc_get_next_desc(sw_desc));
1682
1683                 /* run the descriptor */
1684                 iop_chan_enable(iop_chan);
1685         } else
1686                 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1687                          "failed to allocate null descriptor\n");
1688         spin_unlock_bh(&iop_chan->lock);
1689 }
1690
1691 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1692 {
1693         struct iop_adma_desc_slot *sw_desc, *grp_start;
1694         dma_cookie_t cookie;
1695         int slot_cnt, slots_per_op;
1696
1697         dev_dbg(iop_chan->device->common.dev, "%s\n", __func__);
1698
1699         spin_lock_bh(&iop_chan->lock);
1700         slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1701         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1702         if (sw_desc) {
1703                 grp_start = sw_desc->group_head;
1704                 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
1705                 async_tx_ack(&sw_desc->async_tx);
1706                 iop_desc_init_null_xor(grp_start, 2, 0);
1707                 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1708                 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1709                 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1710                 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1711
1712                 cookie = iop_chan->common.cookie;
1713                 cookie++;
1714                 if (cookie <= 1)
1715                         cookie = 2;
1716
1717                 /* initialize the completed cookie to be less than
1718                  * the most recently used cookie
1719                  */
1720                 iop_chan->completed_cookie = cookie - 1;
1721                 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1722
1723                 /* channel should not be busy */
1724                 BUG_ON(iop_chan_is_busy(iop_chan));
1725
1726                 /* clear any prior error-status bits */
1727                 iop_adma_device_clear_err_status(iop_chan);
1728
1729                 /* disable operation */
1730                 iop_chan_disable(iop_chan);
1731
1732                 /* set the descriptor address */
1733                 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1734
1735                 /* 1/ don't add pre-chained descriptors
1736                  * 2/ dummy read to flush next_desc write
1737                  */
1738                 BUG_ON(iop_desc_get_next_desc(sw_desc));
1739
1740                 /* run the descriptor */
1741                 iop_chan_enable(iop_chan);
1742         } else
1743                 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1744                         "failed to allocate null descriptor\n");
1745         spin_unlock_bh(&iop_chan->lock);
1746 }
1747
1748 MODULE_ALIAS("platform:iop-adma");
1749
1750 static struct platform_driver iop_adma_driver = {
1751         .probe          = iop_adma_probe,
1752         .remove         = __devexit_p(iop_adma_remove),
1753         .driver         = {
1754                 .owner  = THIS_MODULE,
1755                 .name   = "iop-adma",
1756         },
1757 };
1758
1759 static int __init iop_adma_init (void)
1760 {
1761         return platform_driver_register(&iop_adma_driver);
1762 }
1763
1764 static void __exit iop_adma_exit (void)
1765 {
1766         platform_driver_unregister(&iop_adma_driver);
1767         return;
1768 }
1769 module_exit(iop_adma_exit);
1770 module_init(iop_adma_init);
1771
1772 MODULE_AUTHOR("Intel Corporation");
1773 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1774 MODULE_LICENSE("GPL");