Merge branches 'release', 'asus', 'sony-laptop' and 'thinkpad' into release
[pandora-kernel.git] / drivers / dma / iop-adma.c
1 /*
2  * offload engine driver for the Intel Xscale series of i/o processors
3  * Copyright © 2006, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17  *
18  */
19
20 /*
21  * This driver supports the asynchrounous DMA copy and RAID engines available
22  * on the Intel Xscale(R) family of I/O Processors (IOP 32x, 33x, 134x)
23  */
24
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/async_tx.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/spinlock.h>
31 #include <linux/interrupt.h>
32 #include <linux/platform_device.h>
33 #include <linux/memory.h>
34 #include <linux/ioport.h>
35
36 #include <asm/arch/adma.h>
37
38 #define to_iop_adma_chan(chan) container_of(chan, struct iop_adma_chan, common)
39 #define to_iop_adma_device(dev) \
40         container_of(dev, struct iop_adma_device, common)
41 #define tx_to_iop_adma_slot(tx) \
42         container_of(tx, struct iop_adma_desc_slot, async_tx)
43
44 /**
45  * iop_adma_free_slots - flags descriptor slots for reuse
46  * @slot: Slot to free
47  * Caller must hold &iop_chan->lock while calling this function
48  */
49 static void iop_adma_free_slots(struct iop_adma_desc_slot *slot)
50 {
51         int stride = slot->slots_per_op;
52
53         while (stride--) {
54                 slot->slots_per_op = 0;
55                 slot = list_entry(slot->slot_node.next,
56                                 struct iop_adma_desc_slot,
57                                 slot_node);
58         }
59 }
60
61 static dma_cookie_t
62 iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc,
63         struct iop_adma_chan *iop_chan, dma_cookie_t cookie)
64 {
65         BUG_ON(desc->async_tx.cookie < 0);
66         spin_lock_bh(&desc->async_tx.lock);
67         if (desc->async_tx.cookie > 0) {
68                 cookie = desc->async_tx.cookie;
69                 desc->async_tx.cookie = 0;
70
71                 /* call the callback (must not sleep or submit new
72                  * operations to this channel)
73                  */
74                 if (desc->async_tx.callback)
75                         desc->async_tx.callback(
76                                 desc->async_tx.callback_param);
77
78                 /* unmap dma addresses
79                  * (unmap_single vs unmap_page?)
80                  */
81                 if (desc->group_head && desc->unmap_len) {
82                         struct iop_adma_desc_slot *unmap = desc->group_head;
83                         struct device *dev =
84                                 &iop_chan->device->pdev->dev;
85                         u32 len = unmap->unmap_len;
86                         u32 src_cnt = unmap->unmap_src_cnt;
87                         dma_addr_t addr = iop_desc_get_dest_addr(unmap,
88                                 iop_chan);
89
90                         dma_unmap_page(dev, addr, len, DMA_FROM_DEVICE);
91                         while (src_cnt--) {
92                                 addr = iop_desc_get_src_addr(unmap,
93                                                         iop_chan,
94                                                         src_cnt);
95                                 dma_unmap_page(dev, addr, len,
96                                         DMA_TO_DEVICE);
97                         }
98                         desc->group_head = NULL;
99                 }
100         }
101
102         /* run dependent operations */
103         async_tx_run_dependencies(&desc->async_tx);
104         spin_unlock_bh(&desc->async_tx.lock);
105
106         return cookie;
107 }
108
109 static int
110 iop_adma_clean_slot(struct iop_adma_desc_slot *desc,
111         struct iop_adma_chan *iop_chan)
112 {
113         /* the client is allowed to attach dependent operations
114          * until 'ack' is set
115          */
116         if (!desc->async_tx.ack)
117                 return 0;
118
119         /* leave the last descriptor in the chain
120          * so we can append to it
121          */
122         if (desc->chain_node.next == &iop_chan->chain)
123                 return 1;
124
125         dev_dbg(iop_chan->device->common.dev,
126                 "\tfree slot: %d slots_per_op: %d\n",
127                 desc->idx, desc->slots_per_op);
128
129         list_del(&desc->chain_node);
130         iop_adma_free_slots(desc);
131
132         return 0;
133 }
134
135 static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
136 {
137         struct iop_adma_desc_slot *iter, *_iter, *grp_start = NULL;
138         dma_cookie_t cookie = 0;
139         u32 current_desc = iop_chan_get_current_descriptor(iop_chan);
140         int busy = iop_chan_is_busy(iop_chan);
141         int seen_current = 0, slot_cnt = 0, slots_per_op = 0;
142
143         dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
144         /* free completed slots from the chain starting with
145          * the oldest descriptor
146          */
147         list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
148                                         chain_node) {
149                 pr_debug("\tcookie: %d slot: %d busy: %d "
150                         "this_desc: %#x next_desc: %#x ack: %d\n",
151                         iter->async_tx.cookie, iter->idx, busy,
152                         iter->async_tx.phys, iop_desc_get_next_desc(iter),
153                         iter->async_tx.ack);
154                 prefetch(_iter);
155                 prefetch(&_iter->async_tx);
156
157                 /* do not advance past the current descriptor loaded into the
158                  * hardware channel, subsequent descriptors are either in
159                  * process or have not been submitted
160                  */
161                 if (seen_current)
162                         break;
163
164                 /* stop the search if we reach the current descriptor and the
165                  * channel is busy, or if it appears that the current descriptor
166                  * needs to be re-read (i.e. has been appended to)
167                  */
168                 if (iter->async_tx.phys == current_desc) {
169                         BUG_ON(seen_current++);
170                         if (busy || iop_desc_get_next_desc(iter))
171                                 break;
172                 }
173
174                 /* detect the start of a group transaction */
175                 if (!slot_cnt && !slots_per_op) {
176                         slot_cnt = iter->slot_cnt;
177                         slots_per_op = iter->slots_per_op;
178                         if (slot_cnt <= slots_per_op) {
179                                 slot_cnt = 0;
180                                 slots_per_op = 0;
181                         }
182                 }
183
184                 if (slot_cnt) {
185                         pr_debug("\tgroup++\n");
186                         if (!grp_start)
187                                 grp_start = iter;
188                         slot_cnt -= slots_per_op;
189                 }
190
191                 /* all the members of a group are complete */
192                 if (slots_per_op != 0 && slot_cnt == 0) {
193                         struct iop_adma_desc_slot *grp_iter, *_grp_iter;
194                         int end_of_chain = 0;
195                         pr_debug("\tgroup end\n");
196
197                         /* collect the total results */
198                         if (grp_start->xor_check_result) {
199                                 u32 zero_sum_result = 0;
200                                 slot_cnt = grp_start->slot_cnt;
201                                 grp_iter = grp_start;
202
203                                 list_for_each_entry_from(grp_iter,
204                                         &iop_chan->chain, chain_node) {
205                                         zero_sum_result |=
206                                             iop_desc_get_zero_result(grp_iter);
207                                             pr_debug("\titer%d result: %d\n",
208                                             grp_iter->idx, zero_sum_result);
209                                         slot_cnt -= slots_per_op;
210                                         if (slot_cnt == 0)
211                                                 break;
212                                 }
213                                 pr_debug("\tgrp_start->xor_check_result: %p\n",
214                                         grp_start->xor_check_result);
215                                 *grp_start->xor_check_result = zero_sum_result;
216                         }
217
218                         /* clean up the group */
219                         slot_cnt = grp_start->slot_cnt;
220                         grp_iter = grp_start;
221                         list_for_each_entry_safe_from(grp_iter, _grp_iter,
222                                 &iop_chan->chain, chain_node) {
223                                 cookie = iop_adma_run_tx_complete_actions(
224                                         grp_iter, iop_chan, cookie);
225
226                                 slot_cnt -= slots_per_op;
227                                 end_of_chain = iop_adma_clean_slot(grp_iter,
228                                         iop_chan);
229
230                                 if (slot_cnt == 0 || end_of_chain)
231                                         break;
232                         }
233
234                         /* the group should be complete at this point */
235                         BUG_ON(slot_cnt);
236
237                         slots_per_op = 0;
238                         grp_start = NULL;
239                         if (end_of_chain)
240                                 break;
241                         else
242                                 continue;
243                 } else if (slots_per_op) /* wait for group completion */
244                         continue;
245
246                 /* write back zero sum results (single descriptor case) */
247                 if (iter->xor_check_result && iter->async_tx.cookie)
248                         *iter->xor_check_result =
249                                 iop_desc_get_zero_result(iter);
250
251                 cookie = iop_adma_run_tx_complete_actions(
252                                         iter, iop_chan, cookie);
253
254                 if (iop_adma_clean_slot(iter, iop_chan))
255                         break;
256         }
257
258         BUG_ON(!seen_current);
259
260         iop_chan_idle(busy, iop_chan);
261
262         if (cookie > 0) {
263                 iop_chan->completed_cookie = cookie;
264                 pr_debug("\tcompleted cookie %d\n", cookie);
265         }
266 }
267
268 static void
269 iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
270 {
271         spin_lock_bh(&iop_chan->lock);
272         __iop_adma_slot_cleanup(iop_chan);
273         spin_unlock_bh(&iop_chan->lock);
274 }
275
276 static void iop_adma_tasklet(unsigned long data)
277 {
278         struct iop_adma_chan *chan = (struct iop_adma_chan *) data;
279         __iop_adma_slot_cleanup(chan);
280 }
281
282 static struct iop_adma_desc_slot *
283 iop_adma_alloc_slots(struct iop_adma_chan *iop_chan, int num_slots,
284                         int slots_per_op)
285 {
286         struct iop_adma_desc_slot *iter, *_iter, *alloc_start = NULL;
287         LIST_HEAD(chain);
288         int slots_found, retry = 0;
289
290         /* start search from the last allocated descrtiptor
291          * if a contiguous allocation can not be found start searching
292          * from the beginning of the list
293          */
294 retry:
295         slots_found = 0;
296         if (retry == 0)
297                 iter = iop_chan->last_used;
298         else
299                 iter = list_entry(&iop_chan->all_slots,
300                         struct iop_adma_desc_slot,
301                         slot_node);
302
303         list_for_each_entry_safe_continue(
304                 iter, _iter, &iop_chan->all_slots, slot_node) {
305                 prefetch(_iter);
306                 prefetch(&_iter->async_tx);
307                 if (iter->slots_per_op) {
308                         /* give up after finding the first busy slot
309                          * on the second pass through the list
310                          */
311                         if (retry)
312                                 break;
313
314                         slots_found = 0;
315                         continue;
316                 }
317
318                 /* start the allocation if the slot is correctly aligned */
319                 if (!slots_found++) {
320                         if (iop_desc_is_aligned(iter, slots_per_op))
321                                 alloc_start = iter;
322                         else {
323                                 slots_found = 0;
324                                 continue;
325                         }
326                 }
327
328                 if (slots_found == num_slots) {
329                         struct iop_adma_desc_slot *alloc_tail = NULL;
330                         struct iop_adma_desc_slot *last_used = NULL;
331                         iter = alloc_start;
332                         while (num_slots) {
333                                 int i;
334                                 dev_dbg(iop_chan->device->common.dev,
335                                         "allocated slot: %d "
336                                         "(desc %p phys: %#x) slots_per_op %d\n",
337                                         iter->idx, iter->hw_desc,
338                                         iter->async_tx.phys, slots_per_op);
339
340                                 /* pre-ack all but the last descriptor */
341                                 if (num_slots != slots_per_op)
342                                         iter->async_tx.ack = 1;
343                                 else
344                                         iter->async_tx.ack = 0;
345
346                                 list_add_tail(&iter->chain_node, &chain);
347                                 alloc_tail = iter;
348                                 iter->async_tx.cookie = 0;
349                                 iter->slot_cnt = num_slots;
350                                 iter->xor_check_result = NULL;
351                                 for (i = 0; i < slots_per_op; i++) {
352                                         iter->slots_per_op = slots_per_op - i;
353                                         last_used = iter;
354                                         iter = list_entry(iter->slot_node.next,
355                                                 struct iop_adma_desc_slot,
356                                                 slot_node);
357                                 }
358                                 num_slots -= slots_per_op;
359                         }
360                         alloc_tail->group_head = alloc_start;
361                         alloc_tail->async_tx.cookie = -EBUSY;
362                         list_splice(&chain, &alloc_tail->async_tx.tx_list);
363                         iop_chan->last_used = last_used;
364                         iop_desc_clear_next_desc(alloc_start);
365                         iop_desc_clear_next_desc(alloc_tail);
366                         return alloc_tail;
367                 }
368         }
369         if (!retry++)
370                 goto retry;
371
372         /* try to free some slots if the allocation fails */
373         tasklet_schedule(&iop_chan->irq_tasklet);
374
375         return NULL;
376 }
377
378 static dma_cookie_t
379 iop_desc_assign_cookie(struct iop_adma_chan *iop_chan,
380         struct iop_adma_desc_slot *desc)
381 {
382         dma_cookie_t cookie = iop_chan->common.cookie;
383         cookie++;
384         if (cookie < 0)
385                 cookie = 1;
386         iop_chan->common.cookie = desc->async_tx.cookie = cookie;
387         return cookie;
388 }
389
390 static void iop_adma_check_threshold(struct iop_adma_chan *iop_chan)
391 {
392         dev_dbg(iop_chan->device->common.dev, "pending: %d\n",
393                 iop_chan->pending);
394
395         if (iop_chan->pending >= IOP_ADMA_THRESHOLD) {
396                 iop_chan->pending = 0;
397                 iop_chan_append(iop_chan);
398         }
399 }
400
401 static dma_cookie_t
402 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx)
403 {
404         struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx);
405         struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan);
406         struct iop_adma_desc_slot *grp_start, *old_chain_tail;
407         int slot_cnt;
408         int slots_per_op;
409         dma_cookie_t cookie;
410
411         grp_start = sw_desc->group_head;
412         slot_cnt = grp_start->slot_cnt;
413         slots_per_op = grp_start->slots_per_op;
414
415         spin_lock_bh(&iop_chan->lock);
416         cookie = iop_desc_assign_cookie(iop_chan, sw_desc);
417
418         old_chain_tail = list_entry(iop_chan->chain.prev,
419                 struct iop_adma_desc_slot, chain_node);
420         list_splice_init(&sw_desc->async_tx.tx_list,
421                          &old_chain_tail->chain_node);
422
423         /* fix up the hardware chain */
424         iop_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
425
426         /* 1/ don't add pre-chained descriptors
427          * 2/ dummy read to flush next_desc write
428          */
429         BUG_ON(iop_desc_get_next_desc(sw_desc));
430
431         /* increment the pending count by the number of slots
432          * memcpy operations have a 1:1 (slot:operation) relation
433          * other operations are heavier and will pop the threshold
434          * more often.
435          */
436         iop_chan->pending += slot_cnt;
437         iop_adma_check_threshold(iop_chan);
438         spin_unlock_bh(&iop_chan->lock);
439
440         dev_dbg(iop_chan->device->common.dev, "%s cookie: %d slot: %d\n",
441                 __FUNCTION__, sw_desc->async_tx.cookie, sw_desc->idx);
442
443         return cookie;
444 }
445
446 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan);
447 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan);
448
449 /* returns the number of allocated descriptors */
450 static int iop_adma_alloc_chan_resources(struct dma_chan *chan)
451 {
452         char *hw_desc;
453         int idx;
454         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
455         struct iop_adma_desc_slot *slot = NULL;
456         int init = iop_chan->slots_allocated ? 0 : 1;
457         struct iop_adma_platform_data *plat_data =
458                 iop_chan->device->pdev->dev.platform_data;
459         int num_descs_in_pool = plat_data->pool_size/IOP_ADMA_SLOT_SIZE;
460
461         /* Allocate descriptor slots */
462         do {
463                 idx = iop_chan->slots_allocated;
464                 if (idx == num_descs_in_pool)
465                         break;
466
467                 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
468                 if (!slot) {
469                         printk(KERN_INFO "IOP ADMA Channel only initialized"
470                                 " %d descriptor slots", idx);
471                         break;
472                 }
473                 hw_desc = (char *) iop_chan->device->dma_desc_pool_virt;
474                 slot->hw_desc = (void *) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
475
476                 dma_async_tx_descriptor_init(&slot->async_tx, chan);
477                 slot->async_tx.tx_submit = iop_adma_tx_submit;
478                 INIT_LIST_HEAD(&slot->chain_node);
479                 INIT_LIST_HEAD(&slot->slot_node);
480                 INIT_LIST_HEAD(&slot->async_tx.tx_list);
481                 hw_desc = (char *) iop_chan->device->dma_desc_pool;
482                 slot->async_tx.phys =
483                         (dma_addr_t) &hw_desc[idx * IOP_ADMA_SLOT_SIZE];
484                 slot->idx = idx;
485
486                 spin_lock_bh(&iop_chan->lock);
487                 iop_chan->slots_allocated++;
488                 list_add_tail(&slot->slot_node, &iop_chan->all_slots);
489                 spin_unlock_bh(&iop_chan->lock);
490         } while (iop_chan->slots_allocated < num_descs_in_pool);
491
492         if (idx && !iop_chan->last_used)
493                 iop_chan->last_used = list_entry(iop_chan->all_slots.next,
494                                         struct iop_adma_desc_slot,
495                                         slot_node);
496
497         dev_dbg(iop_chan->device->common.dev,
498                 "allocated %d descriptor slots last_used: %p\n",
499                 iop_chan->slots_allocated, iop_chan->last_used);
500
501         /* initialize the channel and the chain with a null operation */
502         if (init) {
503                 if (dma_has_cap(DMA_MEMCPY,
504                         iop_chan->device->common.cap_mask))
505                         iop_chan_start_null_memcpy(iop_chan);
506                 else if (dma_has_cap(DMA_XOR,
507                         iop_chan->device->common.cap_mask))
508                         iop_chan_start_null_xor(iop_chan);
509                 else
510                         BUG();
511         }
512
513         return (idx > 0) ? idx : -ENOMEM;
514 }
515
516 static struct dma_async_tx_descriptor *
517 iop_adma_prep_dma_interrupt(struct dma_chan *chan)
518 {
519         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
520         struct iop_adma_desc_slot *sw_desc, *grp_start;
521         int slot_cnt, slots_per_op;
522
523         dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
524
525         spin_lock_bh(&iop_chan->lock);
526         slot_cnt = iop_chan_interrupt_slot_count(&slots_per_op, iop_chan);
527         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
528         if (sw_desc) {
529                 grp_start = sw_desc->group_head;
530                 iop_desc_init_interrupt(grp_start, iop_chan);
531                 grp_start->unmap_len = 0;
532         }
533         spin_unlock_bh(&iop_chan->lock);
534
535         return sw_desc ? &sw_desc->async_tx : NULL;
536 }
537
538 static struct dma_async_tx_descriptor *
539 iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
540                          dma_addr_t dma_src, size_t len, unsigned long flags)
541 {
542         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
543         struct iop_adma_desc_slot *sw_desc, *grp_start;
544         int slot_cnt, slots_per_op;
545
546         if (unlikely(!len))
547                 return NULL;
548         BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
549
550         dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
551                 __FUNCTION__, len);
552
553         spin_lock_bh(&iop_chan->lock);
554         slot_cnt = iop_chan_memcpy_slot_count(len, &slots_per_op);
555         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
556         if (sw_desc) {
557                 grp_start = sw_desc->group_head;
558                 iop_desc_init_memcpy(grp_start, flags);
559                 iop_desc_set_byte_count(grp_start, iop_chan, len);
560                 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
561                 iop_desc_set_memcpy_src_addr(grp_start, dma_src);
562                 sw_desc->unmap_src_cnt = 1;
563                 sw_desc->unmap_len = len;
564         }
565         spin_unlock_bh(&iop_chan->lock);
566
567         return sw_desc ? &sw_desc->async_tx : NULL;
568 }
569
570 static struct dma_async_tx_descriptor *
571 iop_adma_prep_dma_memset(struct dma_chan *chan, dma_addr_t dma_dest,
572                          int value, size_t len, unsigned long flags)
573 {
574         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
575         struct iop_adma_desc_slot *sw_desc, *grp_start;
576         int slot_cnt, slots_per_op;
577
578         if (unlikely(!len))
579                 return NULL;
580         BUG_ON(unlikely(len > IOP_ADMA_MAX_BYTE_COUNT));
581
582         dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
583                 __FUNCTION__, len);
584
585         spin_lock_bh(&iop_chan->lock);
586         slot_cnt = iop_chan_memset_slot_count(len, &slots_per_op);
587         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
588         if (sw_desc) {
589                 grp_start = sw_desc->group_head;
590                 iop_desc_init_memset(grp_start, flags);
591                 iop_desc_set_byte_count(grp_start, iop_chan, len);
592                 iop_desc_set_block_fill_val(grp_start, value);
593                 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
594                 sw_desc->unmap_src_cnt = 1;
595                 sw_desc->unmap_len = len;
596         }
597         spin_unlock_bh(&iop_chan->lock);
598
599         return sw_desc ? &sw_desc->async_tx : NULL;
600 }
601
602 static struct dma_async_tx_descriptor *
603 iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
604                       dma_addr_t *dma_src, unsigned int src_cnt, size_t len,
605                       unsigned long flags)
606 {
607         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
608         struct iop_adma_desc_slot *sw_desc, *grp_start;
609         int slot_cnt, slots_per_op;
610
611         if (unlikely(!len))
612                 return NULL;
613         BUG_ON(unlikely(len > IOP_ADMA_XOR_MAX_BYTE_COUNT));
614
615         dev_dbg(iop_chan->device->common.dev,
616                 "%s src_cnt: %d len: %u flags: %lx\n",
617                 __FUNCTION__, src_cnt, len, flags);
618
619         spin_lock_bh(&iop_chan->lock);
620         slot_cnt = iop_chan_xor_slot_count(len, src_cnt, &slots_per_op);
621         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
622         if (sw_desc) {
623                 grp_start = sw_desc->group_head;
624                 iop_desc_init_xor(grp_start, src_cnt, flags);
625                 iop_desc_set_byte_count(grp_start, iop_chan, len);
626                 iop_desc_set_dest_addr(grp_start, iop_chan, dma_dest);
627                 sw_desc->unmap_src_cnt = src_cnt;
628                 sw_desc->unmap_len = len;
629                 while (src_cnt--)
630                         iop_desc_set_xor_src_addr(grp_start, src_cnt,
631                                                   dma_src[src_cnt]);
632         }
633         spin_unlock_bh(&iop_chan->lock);
634
635         return sw_desc ? &sw_desc->async_tx : NULL;
636 }
637
638 static struct dma_async_tx_descriptor *
639 iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src,
640                            unsigned int src_cnt, size_t len, u32 *result,
641                            unsigned long flags)
642 {
643         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
644         struct iop_adma_desc_slot *sw_desc, *grp_start;
645         int slot_cnt, slots_per_op;
646
647         if (unlikely(!len))
648                 return NULL;
649
650         dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
651                 __FUNCTION__, src_cnt, len);
652
653         spin_lock_bh(&iop_chan->lock);
654         slot_cnt = iop_chan_zero_sum_slot_count(len, src_cnt, &slots_per_op);
655         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
656         if (sw_desc) {
657                 grp_start = sw_desc->group_head;
658                 iop_desc_init_zero_sum(grp_start, src_cnt, flags);
659                 iop_desc_set_zero_sum_byte_count(grp_start, len);
660                 grp_start->xor_check_result = result;
661                 pr_debug("\t%s: grp_start->xor_check_result: %p\n",
662                         __FUNCTION__, grp_start->xor_check_result);
663                 sw_desc->unmap_src_cnt = src_cnt;
664                 sw_desc->unmap_len = len;
665                 while (src_cnt--)
666                         iop_desc_set_zero_sum_src_addr(grp_start, src_cnt,
667                                                        dma_src[src_cnt]);
668         }
669         spin_unlock_bh(&iop_chan->lock);
670
671         return sw_desc ? &sw_desc->async_tx : NULL;
672 }
673
674 static void iop_adma_dependency_added(struct dma_chan *chan)
675 {
676         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
677         tasklet_schedule(&iop_chan->irq_tasklet);
678 }
679
680 static void iop_adma_free_chan_resources(struct dma_chan *chan)
681 {
682         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
683         struct iop_adma_desc_slot *iter, *_iter;
684         int in_use_descs = 0;
685
686         iop_adma_slot_cleanup(iop_chan);
687
688         spin_lock_bh(&iop_chan->lock);
689         list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
690                                         chain_node) {
691                 in_use_descs++;
692                 list_del(&iter->chain_node);
693         }
694         list_for_each_entry_safe_reverse(
695                 iter, _iter, &iop_chan->all_slots, slot_node) {
696                 list_del(&iter->slot_node);
697                 kfree(iter);
698                 iop_chan->slots_allocated--;
699         }
700         iop_chan->last_used = NULL;
701
702         dev_dbg(iop_chan->device->common.dev, "%s slots_allocated %d\n",
703                 __FUNCTION__, iop_chan->slots_allocated);
704         spin_unlock_bh(&iop_chan->lock);
705
706         /* one is ok since we left it on there on purpose */
707         if (in_use_descs > 1)
708                 printk(KERN_ERR "IOP: Freeing %d in use descriptors!\n",
709                         in_use_descs - 1);
710 }
711
712 /**
713  * iop_adma_is_complete - poll the status of an ADMA transaction
714  * @chan: ADMA channel handle
715  * @cookie: ADMA transaction identifier
716  */
717 static enum dma_status iop_adma_is_complete(struct dma_chan *chan,
718                                         dma_cookie_t cookie,
719                                         dma_cookie_t *done,
720                                         dma_cookie_t *used)
721 {
722         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
723         dma_cookie_t last_used;
724         dma_cookie_t last_complete;
725         enum dma_status ret;
726
727         last_used = chan->cookie;
728         last_complete = iop_chan->completed_cookie;
729
730         if (done)
731                 *done = last_complete;
732         if (used)
733                 *used = last_used;
734
735         ret = dma_async_is_complete(cookie, last_complete, last_used);
736         if (ret == DMA_SUCCESS)
737                 return ret;
738
739         iop_adma_slot_cleanup(iop_chan);
740
741         last_used = chan->cookie;
742         last_complete = iop_chan->completed_cookie;
743
744         if (done)
745                 *done = last_complete;
746         if (used)
747                 *used = last_used;
748
749         return dma_async_is_complete(cookie, last_complete, last_used);
750 }
751
752 static irqreturn_t iop_adma_eot_handler(int irq, void *data)
753 {
754         struct iop_adma_chan *chan = data;
755
756         dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
757
758         tasklet_schedule(&chan->irq_tasklet);
759
760         iop_adma_device_clear_eot_status(chan);
761
762         return IRQ_HANDLED;
763 }
764
765 static irqreturn_t iop_adma_eoc_handler(int irq, void *data)
766 {
767         struct iop_adma_chan *chan = data;
768
769         dev_dbg(chan->device->common.dev, "%s\n", __FUNCTION__);
770
771         tasklet_schedule(&chan->irq_tasklet);
772
773         iop_adma_device_clear_eoc_status(chan);
774
775         return IRQ_HANDLED;
776 }
777
778 static irqreturn_t iop_adma_err_handler(int irq, void *data)
779 {
780         struct iop_adma_chan *chan = data;
781         unsigned long status = iop_chan_get_status(chan);
782
783         dev_printk(KERN_ERR, chan->device->common.dev,
784                 "error ( %s%s%s%s%s%s%s)\n",
785                 iop_is_err_int_parity(status, chan) ? "int_parity " : "",
786                 iop_is_err_mcu_abort(status, chan) ? "mcu_abort " : "",
787                 iop_is_err_int_tabort(status, chan) ? "int_tabort " : "",
788                 iop_is_err_int_mabort(status, chan) ? "int_mabort " : "",
789                 iop_is_err_pci_tabort(status, chan) ? "pci_tabort " : "",
790                 iop_is_err_pci_mabort(status, chan) ? "pci_mabort " : "",
791                 iop_is_err_split_tx(status, chan) ? "split_tx " : "");
792
793         iop_adma_device_clear_err_status(chan);
794
795         BUG();
796
797         return IRQ_HANDLED;
798 }
799
800 static void iop_adma_issue_pending(struct dma_chan *chan)
801 {
802         struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan);
803
804         if (iop_chan->pending) {
805                 iop_chan->pending = 0;
806                 iop_chan_append(iop_chan);
807         }
808 }
809
810 /*
811  * Perform a transaction to verify the HW works.
812  */
813 #define IOP_ADMA_TEST_SIZE 2000
814
815 static int __devinit iop_adma_memcpy_self_test(struct iop_adma_device *device)
816 {
817         int i;
818         void *src, *dest;
819         dma_addr_t src_dma, dest_dma;
820         struct dma_chan *dma_chan;
821         dma_cookie_t cookie;
822         struct dma_async_tx_descriptor *tx;
823         int err = 0;
824         struct iop_adma_chan *iop_chan;
825
826         dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
827
828         src = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
829         if (!src)
830                 return -ENOMEM;
831         dest = kzalloc(sizeof(u8) * IOP_ADMA_TEST_SIZE, GFP_KERNEL);
832         if (!dest) {
833                 kfree(src);
834                 return -ENOMEM;
835         }
836
837         /* Fill in src buffer */
838         for (i = 0; i < IOP_ADMA_TEST_SIZE; i++)
839                 ((u8 *) src)[i] = (u8)i;
840
841         memset(dest, 0, IOP_ADMA_TEST_SIZE);
842
843         /* Start copy, using first DMA channel */
844         dma_chan = container_of(device->common.channels.next,
845                                 struct dma_chan,
846                                 device_node);
847         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
848                 err = -ENODEV;
849                 goto out;
850         }
851
852         dest_dma = dma_map_single(dma_chan->device->dev, dest,
853                                 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
854         src_dma = dma_map_single(dma_chan->device->dev, src,
855                                 IOP_ADMA_TEST_SIZE, DMA_TO_DEVICE);
856         tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
857                                       IOP_ADMA_TEST_SIZE, 1);
858
859         cookie = iop_adma_tx_submit(tx);
860         iop_adma_issue_pending(dma_chan);
861         async_tx_ack(tx);
862         msleep(1);
863
864         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
865                         DMA_SUCCESS) {
866                 dev_printk(KERN_ERR, dma_chan->device->dev,
867                         "Self-test copy timed out, disabling\n");
868                 err = -ENODEV;
869                 goto free_resources;
870         }
871
872         iop_chan = to_iop_adma_chan(dma_chan);
873         dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
874                 IOP_ADMA_TEST_SIZE, DMA_FROM_DEVICE);
875         if (memcmp(src, dest, IOP_ADMA_TEST_SIZE)) {
876                 dev_printk(KERN_ERR, dma_chan->device->dev,
877                         "Self-test copy failed compare, disabling\n");
878                 err = -ENODEV;
879                 goto free_resources;
880         }
881
882 free_resources:
883         iop_adma_free_chan_resources(dma_chan);
884 out:
885         kfree(src);
886         kfree(dest);
887         return err;
888 }
889
890 #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */
891 static int __devinit
892 iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device)
893 {
894         int i, src_idx;
895         struct page *dest;
896         struct page *xor_srcs[IOP_ADMA_NUM_SRC_TEST];
897         struct page *zero_sum_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
898         dma_addr_t dma_srcs[IOP_ADMA_NUM_SRC_TEST + 1];
899         dma_addr_t dma_addr, dest_dma;
900         struct dma_async_tx_descriptor *tx;
901         struct dma_chan *dma_chan;
902         dma_cookie_t cookie;
903         u8 cmp_byte = 0;
904         u32 cmp_word;
905         u32 zero_sum_result;
906         int err = 0;
907         struct iop_adma_chan *iop_chan;
908
909         dev_dbg(device->common.dev, "%s\n", __FUNCTION__);
910
911         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
912                 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
913                 if (!xor_srcs[src_idx])
914                         while (src_idx--) {
915                                 __free_page(xor_srcs[src_idx]);
916                                 return -ENOMEM;
917                         }
918         }
919
920         dest = alloc_page(GFP_KERNEL);
921         if (!dest)
922                 while (src_idx--) {
923                         __free_page(xor_srcs[src_idx]);
924                         return -ENOMEM;
925                 }
926
927         /* Fill in src buffers */
928         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++) {
929                 u8 *ptr = page_address(xor_srcs[src_idx]);
930                 for (i = 0; i < PAGE_SIZE; i++)
931                         ptr[i] = (1 << src_idx);
932         }
933
934         for (src_idx = 0; src_idx < IOP_ADMA_NUM_SRC_TEST; src_idx++)
935                 cmp_byte ^= (u8) (1 << src_idx);
936
937         cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
938                         (cmp_byte << 8) | cmp_byte;
939
940         memset(page_address(dest), 0, PAGE_SIZE);
941
942         dma_chan = container_of(device->common.channels.next,
943                                 struct dma_chan,
944                                 device_node);
945         if (iop_adma_alloc_chan_resources(dma_chan) < 1) {
946                 err = -ENODEV;
947                 goto out;
948         }
949
950         /* test xor */
951         dest_dma = dma_map_page(dma_chan->device->dev, dest, 0,
952                                 PAGE_SIZE, DMA_FROM_DEVICE);
953         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
954                 dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
955                                            0, PAGE_SIZE, DMA_TO_DEVICE);
956         tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
957                                    IOP_ADMA_NUM_SRC_TEST, PAGE_SIZE, 1);
958
959         cookie = iop_adma_tx_submit(tx);
960         iop_adma_issue_pending(dma_chan);
961         async_tx_ack(tx);
962         msleep(8);
963
964         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) !=
965                 DMA_SUCCESS) {
966                 dev_printk(KERN_ERR, dma_chan->device->dev,
967                         "Self-test xor timed out, disabling\n");
968                 err = -ENODEV;
969                 goto free_resources;
970         }
971
972         iop_chan = to_iop_adma_chan(dma_chan);
973         dma_sync_single_for_cpu(&iop_chan->device->pdev->dev, dest_dma,
974                 PAGE_SIZE, DMA_FROM_DEVICE);
975         for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
976                 u32 *ptr = page_address(dest);
977                 if (ptr[i] != cmp_word) {
978                         dev_printk(KERN_ERR, dma_chan->device->dev,
979                                 "Self-test xor failed compare, disabling\n");
980                         err = -ENODEV;
981                         goto free_resources;
982                 }
983         }
984         dma_sync_single_for_device(&iop_chan->device->pdev->dev, dest_dma,
985                 PAGE_SIZE, DMA_TO_DEVICE);
986
987         /* skip zero sum if the capability is not present */
988         if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask))
989                 goto free_resources;
990
991         /* zero sum the sources with the destintation page */
992         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++)
993                 zero_sum_srcs[i] = xor_srcs[i];
994         zero_sum_srcs[i] = dest;
995
996         zero_sum_result = 1;
997
998         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
999                 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1000                                            zero_sum_srcs[i], 0, PAGE_SIZE,
1001                                            DMA_TO_DEVICE);
1002         tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
1003                                         IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1004                                         &zero_sum_result, 1);
1005
1006         cookie = iop_adma_tx_submit(tx);
1007         iop_adma_issue_pending(dma_chan);
1008         async_tx_ack(tx);
1009         msleep(8);
1010
1011         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1012                 dev_printk(KERN_ERR, dma_chan->device->dev,
1013                         "Self-test zero sum timed out, disabling\n");
1014                 err = -ENODEV;
1015                 goto free_resources;
1016         }
1017
1018         if (zero_sum_result != 0) {
1019                 dev_printk(KERN_ERR, dma_chan->device->dev,
1020                         "Self-test zero sum failed compare, disabling\n");
1021                 err = -ENODEV;
1022                 goto free_resources;
1023         }
1024
1025         /* test memset */
1026         dma_addr = dma_map_page(dma_chan->device->dev, dest, 0,
1027                         PAGE_SIZE, DMA_FROM_DEVICE);
1028         tx = iop_adma_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, 1);
1029
1030         cookie = iop_adma_tx_submit(tx);
1031         iop_adma_issue_pending(dma_chan);
1032         async_tx_ack(tx);
1033         msleep(8);
1034
1035         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1036                 dev_printk(KERN_ERR, dma_chan->device->dev,
1037                         "Self-test memset timed out, disabling\n");
1038                 err = -ENODEV;
1039                 goto free_resources;
1040         }
1041
1042         for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1043                 u32 *ptr = page_address(dest);
1044                 if (ptr[i]) {
1045                         dev_printk(KERN_ERR, dma_chan->device->dev,
1046                                 "Self-test memset failed compare, disabling\n");
1047                         err = -ENODEV;
1048                         goto free_resources;
1049                 }
1050         }
1051
1052         /* test for non-zero parity sum */
1053         zero_sum_result = 0;
1054         for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 1; i++)
1055                 dma_srcs[i] = dma_map_page(dma_chan->device->dev,
1056                                            zero_sum_srcs[i], 0, PAGE_SIZE,
1057                                            DMA_TO_DEVICE);
1058         tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs,
1059                                         IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE,
1060                                         &zero_sum_result, 1);
1061
1062         cookie = iop_adma_tx_submit(tx);
1063         iop_adma_issue_pending(dma_chan);
1064         async_tx_ack(tx);
1065         msleep(8);
1066
1067         if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) {
1068                 dev_printk(KERN_ERR, dma_chan->device->dev,
1069                         "Self-test non-zero sum timed out, disabling\n");
1070                 err = -ENODEV;
1071                 goto free_resources;
1072         }
1073
1074         if (zero_sum_result != 1) {
1075                 dev_printk(KERN_ERR, dma_chan->device->dev,
1076                         "Self-test non-zero sum failed compare, disabling\n");
1077                 err = -ENODEV;
1078                 goto free_resources;
1079         }
1080
1081 free_resources:
1082         iop_adma_free_chan_resources(dma_chan);
1083 out:
1084         src_idx = IOP_ADMA_NUM_SRC_TEST;
1085         while (src_idx--)
1086                 __free_page(xor_srcs[src_idx]);
1087         __free_page(dest);
1088         return err;
1089 }
1090
1091 static int __devexit iop_adma_remove(struct platform_device *dev)
1092 {
1093         struct iop_adma_device *device = platform_get_drvdata(dev);
1094         struct dma_chan *chan, *_chan;
1095         struct iop_adma_chan *iop_chan;
1096         int i;
1097         struct iop_adma_platform_data *plat_data = dev->dev.platform_data;
1098
1099         dma_async_device_unregister(&device->common);
1100
1101         for (i = 0; i < 3; i++) {
1102                 unsigned int irq;
1103                 irq = platform_get_irq(dev, i);
1104                 free_irq(irq, device);
1105         }
1106
1107         dma_free_coherent(&dev->dev, plat_data->pool_size,
1108                         device->dma_desc_pool_virt, device->dma_desc_pool);
1109
1110         do {
1111                 struct resource *res;
1112                 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
1113                 release_mem_region(res->start, res->end - res->start);
1114         } while (0);
1115
1116         list_for_each_entry_safe(chan, _chan, &device->common.channels,
1117                                 device_node) {
1118                 iop_chan = to_iop_adma_chan(chan);
1119                 list_del(&chan->device_node);
1120                 kfree(iop_chan);
1121         }
1122         kfree(device);
1123
1124         return 0;
1125 }
1126
1127 static int __devinit iop_adma_probe(struct platform_device *pdev)
1128 {
1129         struct resource *res;
1130         int ret = 0, i;
1131         struct iop_adma_device *adev;
1132         struct iop_adma_chan *iop_chan;
1133         struct dma_device *dma_dev;
1134         struct iop_adma_platform_data *plat_data = pdev->dev.platform_data;
1135
1136         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1137         if (!res)
1138                 return -ENODEV;
1139
1140         if (!devm_request_mem_region(&pdev->dev, res->start,
1141                                 res->end - res->start, pdev->name))
1142                 return -EBUSY;
1143
1144         adev = kzalloc(sizeof(*adev), GFP_KERNEL);
1145         if (!adev)
1146                 return -ENOMEM;
1147         dma_dev = &adev->common;
1148
1149         /* allocate coherent memory for hardware descriptors
1150          * note: writecombine gives slightly better performance, but
1151          * requires that we explicitly flush the writes
1152          */
1153         if ((adev->dma_desc_pool_virt = dma_alloc_writecombine(&pdev->dev,
1154                                         plat_data->pool_size,
1155                                         &adev->dma_desc_pool,
1156                                         GFP_KERNEL)) == NULL) {
1157                 ret = -ENOMEM;
1158                 goto err_free_adev;
1159         }
1160
1161         dev_dbg(&pdev->dev, "%s: allocted descriptor pool virt %p phys %p\n",
1162                 __FUNCTION__, adev->dma_desc_pool_virt,
1163                 (void *) adev->dma_desc_pool);
1164
1165         adev->id = plat_data->hw_id;
1166
1167         /* discover transaction capabilites from the platform data */
1168         dma_dev->cap_mask = plat_data->cap_mask;
1169
1170         adev->pdev = pdev;
1171         platform_set_drvdata(pdev, adev);
1172
1173         INIT_LIST_HEAD(&dma_dev->channels);
1174
1175         /* set base routines */
1176         dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources;
1177         dma_dev->device_free_chan_resources = iop_adma_free_chan_resources;
1178         dma_dev->device_is_tx_complete = iop_adma_is_complete;
1179         dma_dev->device_issue_pending = iop_adma_issue_pending;
1180         dma_dev->device_dependency_added = iop_adma_dependency_added;
1181         dma_dev->dev = &pdev->dev;
1182
1183         /* set prep routines based on capability */
1184         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1185                 dma_dev->device_prep_dma_memcpy = iop_adma_prep_dma_memcpy;
1186         if (dma_has_cap(DMA_MEMSET, dma_dev->cap_mask))
1187                 dma_dev->device_prep_dma_memset = iop_adma_prep_dma_memset;
1188         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1189                 dma_dev->max_xor = iop_adma_get_max_xor();
1190                 dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor;
1191         }
1192         if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask))
1193                 dma_dev->device_prep_dma_zero_sum =
1194                         iop_adma_prep_dma_zero_sum;
1195         if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1196                 dma_dev->device_prep_dma_interrupt =
1197                         iop_adma_prep_dma_interrupt;
1198
1199         iop_chan = kzalloc(sizeof(*iop_chan), GFP_KERNEL);
1200         if (!iop_chan) {
1201                 ret = -ENOMEM;
1202                 goto err_free_dma;
1203         }
1204         iop_chan->device = adev;
1205
1206         iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
1207                                         res->end - res->start);
1208         if (!iop_chan->mmr_base) {
1209                 ret = -ENOMEM;
1210                 goto err_free_iop_chan;
1211         }
1212         tasklet_init(&iop_chan->irq_tasklet, iop_adma_tasklet, (unsigned long)
1213                 iop_chan);
1214
1215         /* clear errors before enabling interrupts */
1216         iop_adma_device_clear_err_status(iop_chan);
1217
1218         for (i = 0; i < 3; i++) {
1219                 irq_handler_t handler[] = { iop_adma_eot_handler,
1220                                         iop_adma_eoc_handler,
1221                                         iop_adma_err_handler };
1222                 int irq = platform_get_irq(pdev, i);
1223                 if (irq < 0) {
1224                         ret = -ENXIO;
1225                         goto err_free_iop_chan;
1226                 } else {
1227                         ret = devm_request_irq(&pdev->dev, irq,
1228                                         handler[i], 0, pdev->name, iop_chan);
1229                         if (ret)
1230                                 goto err_free_iop_chan;
1231                 }
1232         }
1233
1234         spin_lock_init(&iop_chan->lock);
1235         init_timer(&iop_chan->cleanup_watchdog);
1236         iop_chan->cleanup_watchdog.data = (unsigned long) iop_chan;
1237         iop_chan->cleanup_watchdog.function = iop_adma_tasklet;
1238         INIT_LIST_HEAD(&iop_chan->chain);
1239         INIT_LIST_HEAD(&iop_chan->all_slots);
1240         INIT_RCU_HEAD(&iop_chan->common.rcu);
1241         iop_chan->common.device = dma_dev;
1242         list_add_tail(&iop_chan->common.device_node, &dma_dev->channels);
1243
1244         if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
1245                 ret = iop_adma_memcpy_self_test(adev);
1246                 dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret);
1247                 if (ret)
1248                         goto err_free_iop_chan;
1249         }
1250
1251         if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) ||
1252                 dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) {
1253                 ret = iop_adma_xor_zero_sum_self_test(adev);
1254                 dev_dbg(&pdev->dev, "xor self test returned %d\n", ret);
1255                 if (ret)
1256                         goto err_free_iop_chan;
1257         }
1258
1259         dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: "
1260           "( %s%s%s%s%s%s%s%s%s%s)\n",
1261           dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "",
1262           dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "",
1263           dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "",
1264           dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1265           dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "",
1266           dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "",
1267           dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)  ? "fill " : "",
1268           dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "",
1269           dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "",
1270           dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : "");
1271
1272         dma_async_device_register(dma_dev);
1273         goto out;
1274
1275  err_free_iop_chan:
1276         kfree(iop_chan);
1277  err_free_dma:
1278         dma_free_coherent(&adev->pdev->dev, plat_data->pool_size,
1279                         adev->dma_desc_pool_virt, adev->dma_desc_pool);
1280  err_free_adev:
1281         kfree(adev);
1282  out:
1283         return ret;
1284 }
1285
1286 static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan)
1287 {
1288         struct iop_adma_desc_slot *sw_desc, *grp_start;
1289         dma_cookie_t cookie;
1290         int slot_cnt, slots_per_op;
1291
1292         dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
1293
1294         spin_lock_bh(&iop_chan->lock);
1295         slot_cnt = iop_chan_memcpy_slot_count(0, &slots_per_op);
1296         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1297         if (sw_desc) {
1298                 grp_start = sw_desc->group_head;
1299
1300                 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
1301                 sw_desc->async_tx.ack = 1;
1302                 iop_desc_init_memcpy(grp_start, 0);
1303                 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1304                 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1305                 iop_desc_set_memcpy_src_addr(grp_start, 0);
1306
1307                 cookie = iop_chan->common.cookie;
1308                 cookie++;
1309                 if (cookie <= 1)
1310                         cookie = 2;
1311
1312                 /* initialize the completed cookie to be less than
1313                  * the most recently used cookie
1314                  */
1315                 iop_chan->completed_cookie = cookie - 1;
1316                 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1317
1318                 /* channel should not be busy */
1319                 BUG_ON(iop_chan_is_busy(iop_chan));
1320
1321                 /* clear any prior error-status bits */
1322                 iop_adma_device_clear_err_status(iop_chan);
1323
1324                 /* disable operation */
1325                 iop_chan_disable(iop_chan);
1326
1327                 /* set the descriptor address */
1328                 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1329
1330                 /* 1/ don't add pre-chained descriptors
1331                  * 2/ dummy read to flush next_desc write
1332                  */
1333                 BUG_ON(iop_desc_get_next_desc(sw_desc));
1334
1335                 /* run the descriptor */
1336                 iop_chan_enable(iop_chan);
1337         } else
1338                 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1339                          "failed to allocate null descriptor\n");
1340         spin_unlock_bh(&iop_chan->lock);
1341 }
1342
1343 static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
1344 {
1345         struct iop_adma_desc_slot *sw_desc, *grp_start;
1346         dma_cookie_t cookie;
1347         int slot_cnt, slots_per_op;
1348
1349         dev_dbg(iop_chan->device->common.dev, "%s\n", __FUNCTION__);
1350
1351         spin_lock_bh(&iop_chan->lock);
1352         slot_cnt = iop_chan_xor_slot_count(0, 2, &slots_per_op);
1353         sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op);
1354         if (sw_desc) {
1355                 grp_start = sw_desc->group_head;
1356                 list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain);
1357                 sw_desc->async_tx.ack = 1;
1358                 iop_desc_init_null_xor(grp_start, 2, 0);
1359                 iop_desc_set_byte_count(grp_start, iop_chan, 0);
1360                 iop_desc_set_dest_addr(grp_start, iop_chan, 0);
1361                 iop_desc_set_xor_src_addr(grp_start, 0, 0);
1362                 iop_desc_set_xor_src_addr(grp_start, 1, 0);
1363
1364                 cookie = iop_chan->common.cookie;
1365                 cookie++;
1366                 if (cookie <= 1)
1367                         cookie = 2;
1368
1369                 /* initialize the completed cookie to be less than
1370                  * the most recently used cookie
1371                  */
1372                 iop_chan->completed_cookie = cookie - 1;
1373                 iop_chan->common.cookie = sw_desc->async_tx.cookie = cookie;
1374
1375                 /* channel should not be busy */
1376                 BUG_ON(iop_chan_is_busy(iop_chan));
1377
1378                 /* clear any prior error-status bits */
1379                 iop_adma_device_clear_err_status(iop_chan);
1380
1381                 /* disable operation */
1382                 iop_chan_disable(iop_chan);
1383
1384                 /* set the descriptor address */
1385                 iop_chan_set_next_descriptor(iop_chan, sw_desc->async_tx.phys);
1386
1387                 /* 1/ don't add pre-chained descriptors
1388                  * 2/ dummy read to flush next_desc write
1389                  */
1390                 BUG_ON(iop_desc_get_next_desc(sw_desc));
1391
1392                 /* run the descriptor */
1393                 iop_chan_enable(iop_chan);
1394         } else
1395                 dev_printk(KERN_ERR, iop_chan->device->common.dev,
1396                         "failed to allocate null descriptor\n");
1397         spin_unlock_bh(&iop_chan->lock);
1398 }
1399
1400 static struct platform_driver iop_adma_driver = {
1401         .probe          = iop_adma_probe,
1402         .remove         = iop_adma_remove,
1403         .driver         = {
1404                 .owner  = THIS_MODULE,
1405                 .name   = "iop-adma",
1406         },
1407 };
1408
1409 static int __init iop_adma_init (void)
1410 {
1411         return platform_driver_register(&iop_adma_driver);
1412 }
1413
1414 /* it's currently unsafe to unload this module */
1415 #if 0
1416 static void __exit iop_adma_exit (void)
1417 {
1418         platform_driver_unregister(&iop_adma_driver);
1419         return;
1420 }
1421 module_exit(iop_adma_exit);
1422 #endif
1423
1424 module_init(iop_adma_init);
1425
1426 MODULE_AUTHOR("Intel Corporation");
1427 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
1428 MODULE_LICENSE("GPL");