2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2007 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
37 #include "ioatdma_registers.h"
38 #include "ioatdma_hw.h"
40 #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common)
41 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common)
42 #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node)
43 #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx)
45 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80)
46 static int ioat_pending_level = 4;
47 module_param(ioat_pending_level, int, 0644);
48 MODULE_PARM_DESC(ioat_pending_level,
49 "high-water mark for pushing ioat descriptors (default: 4)");
51 #define RESET_DELAY msecs_to_jiffies(100)
52 #define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000))
53 static void ioat_dma_chan_reset_part2(struct work_struct *work);
54 static void ioat_dma_chan_watchdog(struct work_struct *work);
56 /* internal functions */
57 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan);
58 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan);
60 static struct ioat_desc_sw *
61 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
62 static struct ioat_desc_sw *
63 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan);
65 static inline struct ioat_dma_chan *ioat_lookup_chan_by_index(
66 struct ioatdma_device *device,
69 return device->idx[index];
73 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
75 * @data: interrupt data
77 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
79 struct ioatdma_device *instance = data;
80 struct ioat_dma_chan *ioat_chan;
81 unsigned long attnstatus;
85 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
87 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
90 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
91 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
95 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
96 for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
97 ioat_chan = ioat_lookup_chan_by_index(instance, bit);
98 tasklet_schedule(&ioat_chan->cleanup_task);
101 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
106 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
108 * @data: interrupt data
110 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
112 struct ioat_dma_chan *ioat_chan = data;
114 tasklet_schedule(&ioat_chan->cleanup_task);
119 static void ioat_dma_cleanup_tasklet(unsigned long data);
122 * ioat_dma_enumerate_channels - find and initialize the device's channels
123 * @device: the device to be enumerated
125 static int ioat_dma_enumerate_channels(struct ioatdma_device *device)
130 struct ioat_dma_chan *ioat_chan;
132 device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
133 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
134 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
136 for (i = 0; i < device->common.chancnt; i++) {
137 ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL);
139 device->common.chancnt = i;
143 ioat_chan->device = device;
144 ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1));
145 ioat_chan->xfercap = xfercap;
146 ioat_chan->desccount = 0;
147 INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2);
148 if (ioat_chan->device->version != IOAT_VER_1_2) {
149 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE
150 | IOAT_DMA_DCA_ANY_CPU,
151 ioat_chan->reg_base + IOAT_DCACTRL_OFFSET);
153 spin_lock_init(&ioat_chan->cleanup_lock);
154 spin_lock_init(&ioat_chan->desc_lock);
155 INIT_LIST_HEAD(&ioat_chan->free_desc);
156 INIT_LIST_HEAD(&ioat_chan->used_desc);
157 /* This should be made common somewhere in dmaengine.c */
158 ioat_chan->common.device = &device->common;
159 list_add_tail(&ioat_chan->common.device_node,
160 &device->common.channels);
161 device->idx[i] = ioat_chan;
162 tasklet_init(&ioat_chan->cleanup_task,
163 ioat_dma_cleanup_tasklet,
164 (unsigned long) ioat_chan);
165 tasklet_disable(&ioat_chan->cleanup_task);
167 return device->common.chancnt;
171 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
173 * @chan: DMA channel handle
175 static inline void __ioat1_dma_memcpy_issue_pending(
176 struct ioat_dma_chan *ioat_chan)
178 ioat_chan->pending = 0;
179 writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET);
182 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
184 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
186 if (ioat_chan->pending > 0) {
187 spin_lock_bh(&ioat_chan->desc_lock);
188 __ioat1_dma_memcpy_issue_pending(ioat_chan);
189 spin_unlock_bh(&ioat_chan->desc_lock);
193 static inline void __ioat2_dma_memcpy_issue_pending(
194 struct ioat_dma_chan *ioat_chan)
196 ioat_chan->pending = 0;
197 writew(ioat_chan->dmacount,
198 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
201 static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan)
203 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
205 if (ioat_chan->pending > 0) {
206 spin_lock_bh(&ioat_chan->desc_lock);
207 __ioat2_dma_memcpy_issue_pending(ioat_chan);
208 spin_unlock_bh(&ioat_chan->desc_lock);
214 * ioat_dma_chan_reset_part2 - reinit the channel after a reset
216 static void ioat_dma_chan_reset_part2(struct work_struct *work)
218 struct ioat_dma_chan *ioat_chan =
219 container_of(work, struct ioat_dma_chan, work.work);
220 struct ioat_desc_sw *desc;
222 spin_lock_bh(&ioat_chan->cleanup_lock);
223 spin_lock_bh(&ioat_chan->desc_lock);
225 ioat_chan->completion_virt->low = 0;
226 ioat_chan->completion_virt->high = 0;
227 ioat_chan->pending = 0;
230 * count the descriptors waiting, and be sure to do it
231 * right for both the CB1 line and the CB2 ring
233 ioat_chan->dmacount = 0;
234 if (ioat_chan->used_desc.prev) {
235 desc = to_ioat_desc(ioat_chan->used_desc.prev);
237 ioat_chan->dmacount++;
238 desc = to_ioat_desc(desc->node.next);
239 } while (&desc->node != ioat_chan->used_desc.next);
243 * write the new starting descriptor address
244 * this puts channel engine into ARMED state
246 desc = to_ioat_desc(ioat_chan->used_desc.prev);
247 switch (ioat_chan->device->version) {
249 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
250 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
251 writel(((u64) desc->async_tx.phys) >> 32,
252 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
254 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
255 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
258 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
259 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
260 writel(((u64) desc->async_tx.phys) >> 32,
261 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
263 /* tell the engine to go with what's left to be done */
264 writew(ioat_chan->dmacount,
265 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
269 dev_err(&ioat_chan->device->pdev->dev,
270 "chan%d reset - %d descs waiting, %d total desc\n",
271 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
273 spin_unlock_bh(&ioat_chan->desc_lock);
274 spin_unlock_bh(&ioat_chan->cleanup_lock);
278 * ioat_dma_reset_channel - restart a channel
279 * @ioat_chan: IOAT DMA channel handle
281 static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan)
283 u32 chansts, chanerr;
285 if (!ioat_chan->used_desc.prev)
288 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
289 chansts = (ioat_chan->completion_virt->low
290 & IOAT_CHANSTS_DMA_TRANSFER_STATUS);
292 dev_err(&ioat_chan->device->pdev->dev,
293 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
294 chan_num(ioat_chan), chansts, chanerr);
295 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
299 * whack it upside the head with a reset
300 * and wait for things to settle out.
301 * force the pending count to a really big negative
302 * to make sure no one forces an issue_pending
303 * while we're waiting.
306 spin_lock_bh(&ioat_chan->desc_lock);
307 ioat_chan->pending = INT_MIN;
308 writeb(IOAT_CHANCMD_RESET,
310 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
311 spin_unlock_bh(&ioat_chan->desc_lock);
313 /* schedule the 2nd half instead of sleeping a long time */
314 schedule_delayed_work(&ioat_chan->work, RESET_DELAY);
318 * ioat_dma_chan_watchdog - watch for stuck channels
320 static void ioat_dma_chan_watchdog(struct work_struct *work)
322 struct ioatdma_device *device =
323 container_of(work, struct ioatdma_device, work.work);
324 struct ioat_dma_chan *ioat_chan;
334 unsigned long compl_desc_addr_hw;
336 for (i = 0; i < device->common.chancnt; i++) {
337 ioat_chan = ioat_lookup_chan_by_index(device, i);
339 if (ioat_chan->device->version == IOAT_VER_1_2
340 /* have we started processing anything yet */
341 && ioat_chan->last_completion
342 /* have we completed any since last watchdog cycle? */
343 && (ioat_chan->last_completion ==
344 ioat_chan->watchdog_completion)
345 /* has TCP stuck on one cookie since last watchdog? */
346 && (ioat_chan->watchdog_tcp_cookie ==
347 ioat_chan->watchdog_last_tcp_cookie)
348 && (ioat_chan->watchdog_tcp_cookie !=
349 ioat_chan->completed_cookie)
350 /* is there something in the chain to be processed? */
351 /* CB1 chain always has at least the last one processed */
352 && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next)
353 && ioat_chan->pending == 0) {
356 * check CHANSTS register for completed
357 * descriptor address.
358 * if it is different than completion writeback,
360 * and it has changed since the last watchdog
361 * we can assume that channel
362 * is still working correctly
363 * and the problem is in completion writeback.
364 * update completion writeback
365 * with actual CHANSTS value
367 * try resetting the channel
370 completion_hw.low = readl(ioat_chan->reg_base +
371 IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version));
372 completion_hw.high = readl(ioat_chan->reg_base +
373 IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version));
374 #if (BITS_PER_LONG == 64)
377 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
380 completion_hw.low & IOAT_LOW_COMPLETION_MASK;
383 if ((compl_desc_addr_hw != 0)
384 && (compl_desc_addr_hw != ioat_chan->watchdog_completion)
385 && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) {
386 ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw;
387 ioat_chan->completion_virt->low = completion_hw.low;
388 ioat_chan->completion_virt->high = completion_hw.high;
390 ioat_dma_reset_channel(ioat_chan);
391 ioat_chan->watchdog_completion = 0;
392 ioat_chan->last_compl_desc_addr_hw = 0;
396 * for version 2.0 if there are descriptors yet to be processed
397 * and the last completed hasn't changed since the last watchdog
398 * if they haven't hit the pending level
399 * issue the pending to push them through
401 * try resetting the channel
403 } else if (ioat_chan->device->version == IOAT_VER_2_0
404 && ioat_chan->used_desc.prev
405 && ioat_chan->last_completion
406 && ioat_chan->last_completion == ioat_chan->watchdog_completion) {
408 if (ioat_chan->pending < ioat_pending_level)
409 ioat2_dma_memcpy_issue_pending(&ioat_chan->common);
411 ioat_dma_reset_channel(ioat_chan);
412 ioat_chan->watchdog_completion = 0;
415 ioat_chan->last_compl_desc_addr_hw = 0;
416 ioat_chan->watchdog_completion
417 = ioat_chan->last_completion;
420 ioat_chan->watchdog_last_tcp_cookie =
421 ioat_chan->watchdog_tcp_cookie;
424 schedule_delayed_work(&device->work, WATCHDOG_DELAY);
427 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
429 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
430 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
431 struct ioat_desc_sw *prev, *new;
432 struct ioat_dma_descriptor *hw;
434 LIST_HEAD(new_chain);
438 unsigned long orig_flags;
439 unsigned int desc_count = 0;
441 /* src and dest and len are stored in the initial descriptor */
445 orig_flags = first->async_tx.flags;
448 spin_lock_bh(&ioat_chan->desc_lock);
449 prev = to_ioat_desc(ioat_chan->used_desc.prev);
452 copy = min_t(size_t, len, ioat_chan->xfercap);
454 async_tx_ack(&new->async_tx);
463 /* chain together the physical address list for the HW */
465 prev->hw->next = (u64) new->async_tx.phys;
471 list_add_tail(&new->node, &new_chain);
474 } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan)));
476 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
477 if (new->async_tx.callback) {
478 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
480 /* move callback into to last desc */
481 new->async_tx.callback = first->async_tx.callback;
482 new->async_tx.callback_param
483 = first->async_tx.callback_param;
484 first->async_tx.callback = NULL;
485 first->async_tx.callback_param = NULL;
489 new->tx_cnt = desc_count;
490 new->async_tx.flags = orig_flags; /* client is in control of this ack */
492 /* store the original values for use in later cleanup */
494 new->src = first->src;
495 new->dst = first->dst;
496 new->len = first->len;
499 /* cookie incr and addition to used_list must be atomic */
500 cookie = ioat_chan->common.cookie;
504 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
506 /* write address into NextDescriptor field of last desc in chain */
507 to_ioat_desc(ioat_chan->used_desc.prev)->hw->next =
508 first->async_tx.phys;
509 __list_splice(&new_chain, ioat_chan->used_desc.prev);
511 ioat_chan->dmacount += desc_count;
512 ioat_chan->pending += desc_count;
513 if (ioat_chan->pending >= ioat_pending_level)
514 __ioat1_dma_memcpy_issue_pending(ioat_chan);
515 spin_unlock_bh(&ioat_chan->desc_lock);
520 static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
522 struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan);
523 struct ioat_desc_sw *first = tx_to_ioat_desc(tx);
524 struct ioat_desc_sw *new;
525 struct ioat_dma_descriptor *hw;
530 unsigned long orig_flags;
531 unsigned int desc_count = 0;
533 /* src and dest and len are stored in the initial descriptor */
537 orig_flags = first->async_tx.flags;
541 * ioat_chan->desc_lock is still in force in version 2 path
542 * it gets unlocked at end of this function
545 copy = min_t(size_t, len, ioat_chan->xfercap);
547 async_tx_ack(&new->async_tx);
559 } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan)));
561 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
562 if (new->async_tx.callback) {
563 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
565 /* move callback into to last desc */
566 new->async_tx.callback = first->async_tx.callback;
567 new->async_tx.callback_param
568 = first->async_tx.callback_param;
569 first->async_tx.callback = NULL;
570 first->async_tx.callback_param = NULL;
574 new->tx_cnt = desc_count;
575 new->async_tx.flags = orig_flags; /* client is in control of this ack */
577 /* store the original values for use in later cleanup */
579 new->src = first->src;
580 new->dst = first->dst;
581 new->len = first->len;
584 /* cookie incr and addition to used_list must be atomic */
585 cookie = ioat_chan->common.cookie;
589 ioat_chan->common.cookie = new->async_tx.cookie = cookie;
591 ioat_chan->dmacount += desc_count;
592 ioat_chan->pending += desc_count;
593 if (ioat_chan->pending >= ioat_pending_level)
594 __ioat2_dma_memcpy_issue_pending(ioat_chan);
595 spin_unlock_bh(&ioat_chan->desc_lock);
601 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
602 * @ioat_chan: the channel supplying the memory pool for the descriptors
603 * @flags: allocation flags
605 static struct ioat_desc_sw *ioat_dma_alloc_descriptor(
606 struct ioat_dma_chan *ioat_chan,
609 struct ioat_dma_descriptor *desc;
610 struct ioat_desc_sw *desc_sw;
611 struct ioatdma_device *ioatdma_device;
614 ioatdma_device = to_ioatdma_device(ioat_chan->common.device);
615 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
619 desc_sw = kzalloc(sizeof(*desc_sw), flags);
620 if (unlikely(!desc_sw)) {
621 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
625 memset(desc, 0, sizeof(*desc));
626 dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common);
627 switch (ioat_chan->device->version) {
629 desc_sw->async_tx.tx_submit = ioat1_tx_submit;
632 desc_sw->async_tx.tx_submit = ioat2_tx_submit;
635 INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
638 desc_sw->async_tx.phys = phys;
643 static int ioat_initial_desc_count = 256;
644 module_param(ioat_initial_desc_count, int, 0644);
645 MODULE_PARM_DESC(ioat_initial_desc_count,
646 "initial descriptors per channel (default: 256)");
649 * ioat2_dma_massage_chan_desc - link the descriptors into a circle
650 * @ioat_chan: the channel to be massaged
652 static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan)
654 struct ioat_desc_sw *desc, *_desc;
656 /* setup used_desc */
657 ioat_chan->used_desc.next = ioat_chan->free_desc.next;
658 ioat_chan->used_desc.prev = NULL;
660 /* pull free_desc out of the circle so that every node is a hw
661 * descriptor, but leave it pointing to the list
663 ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next;
664 ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev;
666 /* circle link the hw descriptors */
667 desc = to_ioat_desc(ioat_chan->free_desc.next);
668 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
669 list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) {
670 desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys;
675 * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors
676 * @chan: the channel to be filled out
678 static int ioat_dma_alloc_chan_resources(struct dma_chan *chan,
679 struct dma_client *client)
681 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
682 struct ioat_desc_sw *desc;
688 /* have we already been set up? */
689 if (!list_empty(&ioat_chan->free_desc))
690 return ioat_chan->desccount;
692 /* Setup register to interrupt and write completion status on error */
693 chanctrl = IOAT_CHANCTRL_ERR_INT_EN |
694 IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
695 IOAT_CHANCTRL_ERR_COMPLETION_EN;
696 writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
698 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
700 dev_err(&ioat_chan->device->pdev->dev,
701 "CHANERR = %x, clearing\n", chanerr);
702 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
705 /* Allocate descriptors */
706 for (i = 0; i < ioat_initial_desc_count; i++) {
707 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL);
709 dev_err(&ioat_chan->device->pdev->dev,
710 "Only %d initial descriptors\n", i);
713 list_add_tail(&desc->node, &tmp_list);
715 spin_lock_bh(&ioat_chan->desc_lock);
716 ioat_chan->desccount = i;
717 list_splice(&tmp_list, &ioat_chan->free_desc);
718 if (ioat_chan->device->version != IOAT_VER_1_2)
719 ioat2_dma_massage_chan_desc(ioat_chan);
720 spin_unlock_bh(&ioat_chan->desc_lock);
722 /* allocate a completion writeback area */
723 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
724 ioat_chan->completion_virt =
725 pci_pool_alloc(ioat_chan->device->completion_pool,
727 &ioat_chan->completion_addr);
728 memset(ioat_chan->completion_virt, 0,
729 sizeof(*ioat_chan->completion_virt));
730 writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF,
731 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
732 writel(((u64) ioat_chan->completion_addr) >> 32,
733 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
735 tasklet_enable(&ioat_chan->cleanup_task);
736 ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */
737 return ioat_chan->desccount;
741 * ioat_dma_free_chan_resources - release all the descriptors
742 * @chan: the channel to be cleaned
744 static void ioat_dma_free_chan_resources(struct dma_chan *chan)
746 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
747 struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device);
748 struct ioat_desc_sw *desc, *_desc;
749 int in_use_descs = 0;
751 tasklet_disable(&ioat_chan->cleanup_task);
752 ioat_dma_memcpy_cleanup(ioat_chan);
754 /* Delay 100ms after reset to allow internal DMA logic to quiesce
755 * before removing DMA descriptor resources.
757 writeb(IOAT_CHANCMD_RESET,
759 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
762 spin_lock_bh(&ioat_chan->desc_lock);
763 switch (ioat_chan->device->version) {
765 list_for_each_entry_safe(desc, _desc,
766 &ioat_chan->used_desc, node) {
768 list_del(&desc->node);
769 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
770 desc->async_tx.phys);
773 list_for_each_entry_safe(desc, _desc,
774 &ioat_chan->free_desc, node) {
775 list_del(&desc->node);
776 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
777 desc->async_tx.phys);
782 list_for_each_entry_safe(desc, _desc,
783 ioat_chan->free_desc.next, node) {
784 list_del(&desc->node);
785 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
786 desc->async_tx.phys);
789 desc = to_ioat_desc(ioat_chan->free_desc.next);
790 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
791 desc->async_tx.phys);
793 INIT_LIST_HEAD(&ioat_chan->free_desc);
794 INIT_LIST_HEAD(&ioat_chan->used_desc);
797 spin_unlock_bh(&ioat_chan->desc_lock);
799 pci_pool_free(ioatdma_device->completion_pool,
800 ioat_chan->completion_virt,
801 ioat_chan->completion_addr);
803 /* one is ok since we left it on there on purpose */
804 if (in_use_descs > 1)
805 dev_err(&ioat_chan->device->pdev->dev,
806 "Freeing %d in use descriptors!\n",
809 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
810 ioat_chan->pending = 0;
811 ioat_chan->dmacount = 0;
812 ioat_chan->watchdog_completion = 0;
813 ioat_chan->last_compl_desc_addr_hw = 0;
814 ioat_chan->watchdog_tcp_cookie =
815 ioat_chan->watchdog_last_tcp_cookie = 0;
819 * ioat_dma_get_next_descriptor - return the next available descriptor
820 * @ioat_chan: IOAT DMA channel handle
822 * Gets the next descriptor from the chain, and must be called with the
823 * channel's desc_lock held. Allocates more descriptors if the channel
826 static struct ioat_desc_sw *
827 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
829 struct ioat_desc_sw *new;
831 if (!list_empty(&ioat_chan->free_desc)) {
832 new = to_ioat_desc(ioat_chan->free_desc.next);
833 list_del(&new->node);
835 /* try to get another desc */
836 new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
838 dev_err(&ioat_chan->device->pdev->dev,
848 static struct ioat_desc_sw *
849 ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan)
851 struct ioat_desc_sw *new;
854 * used.prev points to where to start processing
855 * used.next points to next free descriptor
856 * if used.prev == NULL, there are none waiting to be processed
857 * if used.next == used.prev.prev, there is only one free descriptor,
858 * and we need to use it to as a noop descriptor before
859 * linking in a new set of descriptors, since the device
860 * has probably already read the pointer to it
862 if (ioat_chan->used_desc.prev &&
863 ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) {
865 struct ioat_desc_sw *desc;
866 struct ioat_desc_sw *noop_desc;
869 /* set up the noop descriptor */
870 noop_desc = to_ioat_desc(ioat_chan->used_desc.next);
871 noop_desc->hw->size = 0;
872 noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL;
873 noop_desc->hw->src_addr = 0;
874 noop_desc->hw->dst_addr = 0;
876 ioat_chan->used_desc.next = ioat_chan->used_desc.next->next;
877 ioat_chan->pending++;
878 ioat_chan->dmacount++;
880 /* try to get a few more descriptors */
881 for (i = 16; i; i--) {
882 desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC);
884 dev_err(&ioat_chan->device->pdev->dev,
888 list_add_tail(&desc->node, ioat_chan->used_desc.next);
891 = to_ioat_desc(desc->node.next)->async_tx.phys;
892 to_ioat_desc(desc->node.prev)->hw->next
893 = desc->async_tx.phys;
894 ioat_chan->desccount++;
897 ioat_chan->used_desc.next = noop_desc->node.next;
899 new = to_ioat_desc(ioat_chan->used_desc.next);
901 ioat_chan->used_desc.next = new->node.next;
903 if (ioat_chan->used_desc.prev == NULL)
904 ioat_chan->used_desc.prev = &new->node;
910 static struct ioat_desc_sw *ioat_dma_get_next_descriptor(
911 struct ioat_dma_chan *ioat_chan)
916 switch (ioat_chan->device->version) {
918 return ioat1_dma_get_next_descriptor(ioat_chan);
921 return ioat2_dma_get_next_descriptor(ioat_chan);
927 static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
928 struct dma_chan *chan,
934 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
935 struct ioat_desc_sw *new;
937 spin_lock_bh(&ioat_chan->desc_lock);
938 new = ioat_dma_get_next_descriptor(ioat_chan);
939 spin_unlock_bh(&ioat_chan->desc_lock);
945 new->async_tx.flags = flags;
946 return &new->async_tx;
948 dev_err(&ioat_chan->device->pdev->dev,
949 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
950 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
955 static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
956 struct dma_chan *chan,
962 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
963 struct ioat_desc_sw *new;
965 spin_lock_bh(&ioat_chan->desc_lock);
966 new = ioat2_dma_get_next_descriptor(ioat_chan);
969 * leave ioat_chan->desc_lock set in ioat 2 path
970 * it will get unlocked at end of tx_submit
977 new->async_tx.flags = flags;
978 return &new->async_tx;
980 spin_unlock_bh(&ioat_chan->desc_lock);
981 dev_err(&ioat_chan->device->pdev->dev,
982 "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n",
983 chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount);
988 static void ioat_dma_cleanup_tasklet(unsigned long data)
990 struct ioat_dma_chan *chan = (void *)data;
991 ioat_dma_memcpy_cleanup(chan);
992 writew(IOAT_CHANCTRL_INT_DISABLE,
993 chan->reg_base + IOAT_CHANCTRL_OFFSET);
997 ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc)
1000 * yes we are unmapping both _page and _single
1001 * alloc'd regions with unmap_page. Is this
1002 * *really* that bad?
1004 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP))
1005 pci_unmap_page(ioat_chan->device->pdev,
1006 pci_unmap_addr(desc, dst),
1007 pci_unmap_len(desc, len),
1008 PCI_DMA_FROMDEVICE);
1010 if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP))
1011 pci_unmap_page(ioat_chan->device->pdev,
1012 pci_unmap_addr(desc, src),
1013 pci_unmap_len(desc, len),
1018 * ioat_dma_memcpy_cleanup - cleanup up finished descriptors
1019 * @chan: ioat channel to be cleaned up
1021 static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan)
1023 unsigned long phys_complete;
1024 struct ioat_desc_sw *desc, *_desc;
1025 dma_cookie_t cookie = 0;
1026 unsigned long desc_phys;
1027 struct ioat_desc_sw *latest_desc;
1029 prefetch(ioat_chan->completion_virt);
1031 if (!spin_trylock_bh(&ioat_chan->cleanup_lock))
1034 /* The completion writeback can happen at any time,
1035 so reads by the driver need to be atomic operations
1036 The descriptor physical addresses are limited to 32-bits
1037 when the CPU can only do a 32-bit mov */
1039 #if (BITS_PER_LONG == 64)
1041 ioat_chan->completion_virt->full
1042 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1045 ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK;
1048 if ((ioat_chan->completion_virt->full
1049 & IOAT_CHANSTS_DMA_TRANSFER_STATUS) ==
1050 IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) {
1051 dev_err(&ioat_chan->device->pdev->dev,
1052 "Channel halted, chanerr = %x\n",
1053 readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET));
1055 /* TODO do something to salvage the situation */
1058 if (phys_complete == ioat_chan->last_completion) {
1059 spin_unlock_bh(&ioat_chan->cleanup_lock);
1061 * perhaps we're stuck so hard that the watchdog can't go off?
1062 * try to catch it after 2 seconds
1064 if (time_after(jiffies,
1065 ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) {
1066 ioat_dma_chan_watchdog(&(ioat_chan->device->work.work));
1067 ioat_chan->last_completion_time = jiffies;
1071 ioat_chan->last_completion_time = jiffies;
1074 if (!spin_trylock_bh(&ioat_chan->desc_lock)) {
1075 spin_unlock_bh(&ioat_chan->cleanup_lock);
1079 switch (ioat_chan->device->version) {
1081 list_for_each_entry_safe(desc, _desc,
1082 &ioat_chan->used_desc, node) {
1085 * Incoming DMA requests may use multiple descriptors,
1086 * due to exceeding xfercap, perhaps. If so, only the
1087 * last one will have a cookie, and require unmapping.
1089 if (desc->async_tx.cookie) {
1090 cookie = desc->async_tx.cookie;
1091 ioat_dma_unmap(ioat_chan, desc);
1092 if (desc->async_tx.callback) {
1093 desc->async_tx.callback(desc->async_tx.callback_param);
1094 desc->async_tx.callback = NULL;
1098 if (desc->async_tx.phys != phys_complete) {
1100 * a completed entry, but not the last, so clean
1101 * up if the client is done with the descriptor
1103 if (async_tx_test_ack(&desc->async_tx)) {
1104 list_del(&desc->node);
1105 list_add_tail(&desc->node,
1106 &ioat_chan->free_desc);
1108 desc->async_tx.cookie = 0;
1111 * last used desc. Do not remove, so we can
1112 * append from it, but don't look at it next
1115 desc->async_tx.cookie = 0;
1117 /* TODO check status bits? */
1123 /* has some other thread has already cleaned up? */
1124 if (ioat_chan->used_desc.prev == NULL)
1127 /* work backwards to find latest finished desc */
1128 desc = to_ioat_desc(ioat_chan->used_desc.next);
1131 desc = to_ioat_desc(desc->node.prev);
1132 desc_phys = (unsigned long)desc->async_tx.phys
1133 & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
1134 if (desc_phys == phys_complete) {
1138 } while (&desc->node != ioat_chan->used_desc.prev);
1140 if (latest_desc != NULL) {
1142 /* work forwards to clear finished descriptors */
1143 for (desc = to_ioat_desc(ioat_chan->used_desc.prev);
1144 &desc->node != latest_desc->node.next &&
1145 &desc->node != ioat_chan->used_desc.next;
1146 desc = to_ioat_desc(desc->node.next)) {
1147 if (desc->async_tx.cookie) {
1148 cookie = desc->async_tx.cookie;
1149 desc->async_tx.cookie = 0;
1150 ioat_dma_unmap(ioat_chan, desc);
1151 if (desc->async_tx.callback) {
1152 desc->async_tx.callback(desc->async_tx.callback_param);
1153 desc->async_tx.callback = NULL;
1158 /* move used.prev up beyond those that are finished */
1159 if (&desc->node == ioat_chan->used_desc.next)
1160 ioat_chan->used_desc.prev = NULL;
1162 ioat_chan->used_desc.prev = &desc->node;
1167 spin_unlock_bh(&ioat_chan->desc_lock);
1169 ioat_chan->last_completion = phys_complete;
1171 ioat_chan->completed_cookie = cookie;
1173 spin_unlock_bh(&ioat_chan->cleanup_lock);
1177 * ioat_dma_is_complete - poll the status of a IOAT DMA transaction
1178 * @chan: IOAT DMA channel handle
1179 * @cookie: DMA transaction identifier
1180 * @done: if not %NULL, updated with last completed transaction
1181 * @used: if not %NULL, updated with last used transaction
1183 static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
1184 dma_cookie_t cookie,
1188 struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan);
1189 dma_cookie_t last_used;
1190 dma_cookie_t last_complete;
1191 enum dma_status ret;
1193 last_used = chan->cookie;
1194 last_complete = ioat_chan->completed_cookie;
1195 ioat_chan->watchdog_tcp_cookie = cookie;
1198 *done = last_complete;
1202 ret = dma_async_is_complete(cookie, last_complete, last_used);
1203 if (ret == DMA_SUCCESS)
1206 ioat_dma_memcpy_cleanup(ioat_chan);
1208 last_used = chan->cookie;
1209 last_complete = ioat_chan->completed_cookie;
1212 *done = last_complete;
1216 return dma_async_is_complete(cookie, last_complete, last_used);
1219 static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan)
1221 struct ioat_desc_sw *desc;
1223 spin_lock_bh(&ioat_chan->desc_lock);
1225 desc = ioat_dma_get_next_descriptor(ioat_chan);
1226 desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL
1227 | IOAT_DMA_DESCRIPTOR_CTL_INT_GN
1228 | IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
1230 desc->hw->src_addr = 0;
1231 desc->hw->dst_addr = 0;
1232 async_tx_ack(&desc->async_tx);
1233 switch (ioat_chan->device->version) {
1236 list_add_tail(&desc->node, &ioat_chan->used_desc);
1238 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1239 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW);
1240 writel(((u64) desc->async_tx.phys) >> 32,
1241 ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH);
1243 writeb(IOAT_CHANCMD_START, ioat_chan->reg_base
1244 + IOAT_CHANCMD_OFFSET(ioat_chan->device->version));
1247 writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF,
1248 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
1249 writel(((u64) desc->async_tx.phys) >> 32,
1250 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
1252 ioat_chan->dmacount++;
1253 __ioat2_dma_memcpy_issue_pending(ioat_chan);
1256 spin_unlock_bh(&ioat_chan->desc_lock);
1260 * Perform a IOAT transaction to verify the HW works.
1262 #define IOAT_TEST_SIZE 2000
1264 static void ioat_dma_test_callback(void *dma_async_param)
1266 printk(KERN_ERR "ioatdma: ioat_dma_test_callback(%p)\n",
1271 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
1272 * @device: device to be tested
1274 static int ioat_dma_self_test(struct ioatdma_device *device)
1279 struct dma_chan *dma_chan;
1280 struct dma_async_tx_descriptor *tx;
1281 dma_addr_t dma_dest, dma_src;
1282 dma_cookie_t cookie;
1285 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1288 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
1294 /* Fill in src buffer */
1295 for (i = 0; i < IOAT_TEST_SIZE; i++)
1298 /* Start copy, using first DMA channel */
1299 dma_chan = container_of(device->common.channels.next,
1302 if (device->common.device_alloc_chan_resources(dma_chan, NULL) < 1) {
1303 dev_err(&device->pdev->dev,
1304 "selftest cannot allocate chan resource\n");
1309 dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE,
1311 dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE,
1313 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
1316 dev_err(&device->pdev->dev,
1317 "Self-test prep failed, disabling\n");
1319 goto free_resources;
1323 tx->callback = ioat_dma_test_callback;
1324 tx->callback_param = (void *)0x8086;
1325 cookie = tx->tx_submit(tx);
1327 dev_err(&device->pdev->dev,
1328 "Self-test setup failed, disabling\n");
1330 goto free_resources;
1332 device->common.device_issue_pending(dma_chan);
1335 if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL)
1337 dev_err(&device->pdev->dev,
1338 "Self-test copy timed out, disabling\n");
1340 goto free_resources;
1342 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
1343 dev_err(&device->pdev->dev,
1344 "Self-test copy failed compare, disabling\n");
1346 goto free_resources;
1350 device->common.device_free_chan_resources(dma_chan);
1357 static char ioat_interrupt_style[32] = "msix";
1358 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
1359 sizeof(ioat_interrupt_style), 0644);
1360 MODULE_PARM_DESC(ioat_interrupt_style,
1361 "set ioat interrupt style: msix (default), "
1362 "msix-single-vector, msi, intx)");
1365 * ioat_dma_setup_interrupts - setup interrupt handler
1366 * @device: ioat device
1368 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
1370 struct ioat_dma_chan *ioat_chan;
1371 int err, i, j, msixcnt;
1374 if (!strcmp(ioat_interrupt_style, "msix"))
1376 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
1377 goto msix_single_vector;
1378 if (!strcmp(ioat_interrupt_style, "msi"))
1380 if (!strcmp(ioat_interrupt_style, "intx"))
1382 dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n",
1383 ioat_interrupt_style);
1387 /* The number of MSI-X vectors should equal the number of channels */
1388 msixcnt = device->common.chancnt;
1389 for (i = 0; i < msixcnt; i++)
1390 device->msix_entries[i].entry = i;
1392 err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt);
1396 goto msix_single_vector;
1398 for (i = 0; i < msixcnt; i++) {
1399 ioat_chan = ioat_lookup_chan_by_index(device, i);
1400 err = request_irq(device->msix_entries[i].vector,
1401 ioat_dma_do_interrupt_msix,
1402 0, "ioat-msix", ioat_chan);
1404 for (j = 0; j < i; j++) {
1406 ioat_lookup_chan_by_index(device, j);
1407 free_irq(device->msix_entries[j].vector,
1410 goto msix_single_vector;
1413 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
1414 device->irq_mode = msix_multi_vector;
1418 device->msix_entries[0].entry = 0;
1419 err = pci_enable_msix(device->pdev, device->msix_entries, 1);
1423 err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt,
1424 0, "ioat-msix", device);
1426 pci_disable_msix(device->pdev);
1429 device->irq_mode = msix_single_vector;
1433 err = pci_enable_msi(device->pdev);
1437 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1438 0, "ioat-msi", device);
1440 pci_disable_msi(device->pdev);
1444 * CB 1.2 devices need a bit set in configuration space to enable MSI
1446 if (device->version == IOAT_VER_1_2) {
1448 pci_read_config_dword(device->pdev,
1449 IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1450 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1451 pci_write_config_dword(device->pdev,
1452 IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1454 device->irq_mode = msi;
1458 err = request_irq(device->pdev->irq, ioat_dma_do_interrupt,
1459 IRQF_SHARED, "ioat-intx", device);
1462 device->irq_mode = intx;
1465 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1466 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1470 /* Disable all interrupt generation */
1471 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1472 dev_err(&device->pdev->dev, "no usable interrupts\n");
1473 device->irq_mode = none;
1478 * ioat_dma_remove_interrupts - remove whatever interrupts were set
1479 * @device: ioat device
1481 static void ioat_dma_remove_interrupts(struct ioatdma_device *device)
1483 struct ioat_dma_chan *ioat_chan;
1486 /* Disable all interrupt generation */
1487 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1489 switch (device->irq_mode) {
1490 case msix_multi_vector:
1491 for (i = 0; i < device->common.chancnt; i++) {
1492 ioat_chan = ioat_lookup_chan_by_index(device, i);
1493 free_irq(device->msix_entries[i].vector, ioat_chan);
1495 pci_disable_msix(device->pdev);
1497 case msix_single_vector:
1498 free_irq(device->msix_entries[0].vector, device);
1499 pci_disable_msix(device->pdev);
1502 free_irq(device->pdev->irq, device);
1503 pci_disable_msi(device->pdev);
1506 free_irq(device->pdev->irq, device);
1509 dev_warn(&device->pdev->dev,
1510 "call to %s without interrupts setup\n", __func__);
1512 device->irq_mode = none;
1515 struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev,
1516 void __iomem *iobase)
1519 struct ioatdma_device *device;
1521 device = kzalloc(sizeof(*device), GFP_KERNEL);
1526 device->pdev = pdev;
1527 device->reg_base = iobase;
1528 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
1530 /* DMA coherent memory pool for DMA descriptor allocations */
1531 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1532 sizeof(struct ioat_dma_descriptor),
1534 if (!device->dma_pool) {
1539 device->completion_pool = pci_pool_create("completion_pool", pdev,
1540 sizeof(u64), SMP_CACHE_BYTES,
1542 if (!device->completion_pool) {
1544 goto err_completion_pool;
1547 INIT_LIST_HEAD(&device->common.channels);
1548 ioat_dma_enumerate_channels(device);
1550 device->common.device_alloc_chan_resources =
1551 ioat_dma_alloc_chan_resources;
1552 device->common.device_free_chan_resources =
1553 ioat_dma_free_chan_resources;
1554 device->common.dev = &pdev->dev;
1556 dma_cap_set(DMA_MEMCPY, device->common.cap_mask);
1557 device->common.device_is_tx_complete = ioat_dma_is_complete;
1558 switch (device->version) {
1560 device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1561 device->common.device_issue_pending =
1562 ioat1_dma_memcpy_issue_pending;
1565 device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy;
1566 device->common.device_issue_pending =
1567 ioat2_dma_memcpy_issue_pending;
1571 dev_err(&device->pdev->dev,
1572 "Intel(R) I/OAT DMA Engine found,"
1573 " %d channels, device version 0x%02x, driver version %s\n",
1574 device->common.chancnt, device->version, IOAT_DMA_VERSION);
1576 err = ioat_dma_setup_interrupts(device);
1578 goto err_setup_interrupts;
1580 err = ioat_dma_self_test(device);
1584 ioat_set_tcp_copy_break(device);
1586 dma_async_device_register(&device->common);
1588 INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog);
1589 schedule_delayed_work(&device->work,
1595 ioat_dma_remove_interrupts(device);
1596 err_setup_interrupts:
1597 pci_pool_destroy(device->completion_pool);
1598 err_completion_pool:
1599 pci_pool_destroy(device->dma_pool);
1604 "Intel(R) I/OAT DMA Engine initialization failed\n");
1608 void ioat_dma_remove(struct ioatdma_device *device)
1610 struct dma_chan *chan, *_chan;
1611 struct ioat_dma_chan *ioat_chan;
1613 ioat_dma_remove_interrupts(device);
1615 dma_async_device_unregister(&device->common);
1617 pci_pool_destroy(device->dma_pool);
1618 pci_pool_destroy(device->completion_pool);
1620 iounmap(device->reg_base);
1621 pci_release_regions(device->pdev);
1622 pci_disable_device(device->pdev);
1624 cancel_delayed_work(&device->work);
1626 list_for_each_entry_safe(chan, _chan,
1627 &device->common.channels, device_node) {
1628 ioat_chan = to_ioat_chan(chan);
1629 list_del(&chan->device_node);