2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/slab.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/dmaengine.h>
34 #include <linux/delay.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/workqueue.h>
37 #include <linux/prefetch.h>
38 #include <linux/i7300_idle.h>
40 #include "registers.h"
43 int ioat_pending_level = 4;
44 module_param(ioat_pending_level, int, 0644);
45 MODULE_PARM_DESC(ioat_pending_level,
46 "high-water mark for pushing ioat descriptors (default: 4)");
48 /* internal functions */
49 static void ioat1_cleanup(struct ioat_dma_chan *ioat);
50 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat);
53 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
55 * @data: interrupt data
57 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
59 struct ioatdma_device *instance = data;
60 struct ioat_chan_common *chan;
61 unsigned long attnstatus;
65 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
67 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
70 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
71 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
75 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
76 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
77 chan = ioat_chan_by_index(instance, bit);
78 if (test_bit(IOAT_RUN, &chan->state))
79 tasklet_schedule(&chan->cleanup_task);
82 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
87 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
89 * @data: interrupt data
91 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
93 struct ioat_chan_common *chan = data;
95 if (test_bit(IOAT_RUN, &chan->state))
96 tasklet_schedule(&chan->cleanup_task);
101 /* common channel initialization */
102 void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
104 struct dma_device *dma = &device->common;
105 struct dma_chan *c = &chan->common;
106 unsigned long data = (unsigned long) c;
108 chan->device = device;
109 chan->reg_base = device->reg_base + (0x80 * (idx + 1));
110 spin_lock_init(&chan->cleanup_lock);
111 chan->common.device = dma;
112 list_add_tail(&chan->common.device_node, &dma->channels);
113 device->idx[idx] = chan;
114 init_timer(&chan->timer);
115 chan->timer.function = device->timer_fn;
116 chan->timer.data = data;
117 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
121 * ioat1_dma_enumerate_channels - find and initialize the device's channels
122 * @device: the device to be enumerated
124 static int ioat1_enumerate_channels(struct ioatdma_device *device)
129 struct ioat_dma_chan *ioat;
130 struct device *dev = &device->pdev->dev;
131 struct dma_device *dma = &device->common;
133 INIT_LIST_HEAD(&dma->channels);
134 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
135 dma->chancnt &= 0x1f; /* bits [4:0] valid */
136 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
137 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
138 dma->chancnt, ARRAY_SIZE(device->idx));
139 dma->chancnt = ARRAY_SIZE(device->idx);
141 xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
142 xfercap_scale &= 0x1f; /* bits [4:0] valid */
143 xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale));
144 dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap);
146 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
147 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
150 for (i = 0; i < dma->chancnt; i++) {
151 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
155 ioat_init_channel(device, &ioat->base, i);
156 ioat->xfercap = xfercap;
157 spin_lock_init(&ioat->desc_lock);
158 INIT_LIST_HEAD(&ioat->free_desc);
159 INIT_LIST_HEAD(&ioat->used_desc);
166 * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended
168 * @chan: DMA channel handle
171 __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat)
173 void __iomem *reg_base = ioat->base.reg_base;
175 dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n",
176 __func__, ioat->pending);
178 writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET);
181 static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan)
183 struct ioat_dma_chan *ioat = to_ioat_chan(chan);
185 if (ioat->pending > 0) {
186 spin_lock_bh(&ioat->desc_lock);
187 __ioat1_dma_memcpy_issue_pending(ioat);
188 spin_unlock_bh(&ioat->desc_lock);
193 * ioat1_reset_channel - restart a channel
194 * @ioat: IOAT DMA channel handle
196 static void ioat1_reset_channel(struct ioat_dma_chan *ioat)
198 struct ioat_chan_common *chan = &ioat->base;
199 void __iomem *reg_base = chan->reg_base;
200 u32 chansts, chanerr;
202 dev_warn(to_dev(chan), "reset\n");
203 chanerr = readl(reg_base + IOAT_CHANERR_OFFSET);
204 chansts = *chan->completion & IOAT_CHANSTS_STATUS;
206 dev_err(to_dev(chan),
207 "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n",
208 chan_num(chan), chansts, chanerr);
209 writel(chanerr, reg_base + IOAT_CHANERR_OFFSET);
213 * whack it upside the head with a reset
214 * and wait for things to settle out.
215 * force the pending count to a really big negative
216 * to make sure no one forces an issue_pending
217 * while we're waiting.
220 ioat->pending = INT_MIN;
221 writeb(IOAT_CHANCMD_RESET,
222 reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
223 set_bit(IOAT_RESET_PENDING, &chan->state);
224 mod_timer(&chan->timer, jiffies + RESET_DELAY);
227 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
229 struct dma_chan *c = tx->chan;
230 struct ioat_dma_chan *ioat = to_ioat_chan(c);
231 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx);
232 struct ioat_chan_common *chan = &ioat->base;
233 struct ioat_desc_sw *first;
234 struct ioat_desc_sw *chain_tail;
237 spin_lock_bh(&ioat->desc_lock);
238 /* cookie incr and addition to used_list must be atomic */
245 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
247 /* write address into NextDescriptor field of last desc in chain */
248 first = to_ioat_desc(desc->tx_list.next);
249 chain_tail = to_ioat_desc(ioat->used_desc.prev);
250 /* make descriptor updates globally visible before chaining */
252 chain_tail->hw->next = first->txd.phys;
253 list_splice_tail_init(&desc->tx_list, &ioat->used_desc);
254 dump_desc_dbg(ioat, chain_tail);
255 dump_desc_dbg(ioat, first);
257 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
258 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
260 ioat->active += desc->hw->tx_cnt;
261 ioat->pending += desc->hw->tx_cnt;
262 if (ioat->pending >= ioat_pending_level)
263 __ioat1_dma_memcpy_issue_pending(ioat);
264 spin_unlock_bh(&ioat->desc_lock);
270 * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair
271 * @ioat: the channel supplying the memory pool for the descriptors
272 * @flags: allocation flags
274 static struct ioat_desc_sw *
275 ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags)
277 struct ioat_dma_descriptor *desc;
278 struct ioat_desc_sw *desc_sw;
279 struct ioatdma_device *ioatdma_device;
282 ioatdma_device = ioat->base.device;
283 desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys);
287 desc_sw = kzalloc(sizeof(*desc_sw), flags);
288 if (unlikely(!desc_sw)) {
289 pci_pool_free(ioatdma_device->dma_pool, desc, phys);
293 memset(desc, 0, sizeof(*desc));
295 INIT_LIST_HEAD(&desc_sw->tx_list);
296 dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common);
297 desc_sw->txd.tx_submit = ioat1_tx_submit;
299 desc_sw->txd.phys = phys;
300 set_desc_id(desc_sw, -1);
305 static int ioat_initial_desc_count = 256;
306 module_param(ioat_initial_desc_count, int, 0644);
307 MODULE_PARM_DESC(ioat_initial_desc_count,
308 "ioat1: initial descriptors per channel (default: 256)");
310 * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors
311 * @chan: the channel to be filled out
313 static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
315 struct ioat_dma_chan *ioat = to_ioat_chan(c);
316 struct ioat_chan_common *chan = &ioat->base;
317 struct ioat_desc_sw *desc;
322 /* have we already been set up? */
323 if (!list_empty(&ioat->free_desc))
324 return ioat->desccount;
326 /* Setup register to interrupt and write completion status on error */
327 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
329 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
331 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
332 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
335 /* Allocate descriptors */
336 for (i = 0; i < ioat_initial_desc_count; i++) {
337 desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL);
339 dev_err(to_dev(chan), "Only %d initial descriptors\n", i);
342 set_desc_id(desc, i);
343 list_add_tail(&desc->node, &tmp_list);
345 spin_lock_bh(&ioat->desc_lock);
347 list_splice(&tmp_list, &ioat->free_desc);
348 spin_unlock_bh(&ioat->desc_lock);
350 /* allocate a completion writeback area */
351 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
352 chan->completion = pci_pool_alloc(chan->device->completion_pool,
353 GFP_KERNEL, &chan->completion_dma);
354 memset(chan->completion, 0, sizeof(*chan->completion));
355 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
356 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
357 writel(((u64) chan->completion_dma) >> 32,
358 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
360 set_bit(IOAT_RUN, &chan->state);
361 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
362 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
363 __func__, ioat->desccount);
364 return ioat->desccount;
367 void ioat_stop(struct ioat_chan_common *chan)
369 struct ioatdma_device *device = chan->device;
370 struct pci_dev *pdev = device->pdev;
371 int chan_id = chan_num(chan);
373 /* 1/ stop irq from firing tasklets
374 * 2/ stop the tasklet from re-arming irqs
376 clear_bit(IOAT_RUN, &chan->state);
378 /* flush inflight interrupts */
379 #ifdef CONFIG_PCI_MSI
380 if (pdev->msix_enabled) {
381 struct msix_entry *msix = &device->msix_entries[chan_id];
382 synchronize_irq(msix->vector);
385 synchronize_irq(pdev->irq);
387 /* flush inflight timers */
388 del_timer_sync(&chan->timer);
390 /* flush inflight tasklet runs */
391 tasklet_kill(&chan->cleanup_task);
393 /* final cleanup now that everything is quiesced and can't re-arm */
394 device->cleanup_fn((unsigned long) &chan->common);
398 * ioat1_dma_free_chan_resources - release all the descriptors
399 * @chan: the channel to be cleaned
401 static void ioat1_dma_free_chan_resources(struct dma_chan *c)
403 struct ioat_dma_chan *ioat = to_ioat_chan(c);
404 struct ioat_chan_common *chan = &ioat->base;
405 struct ioatdma_device *ioatdma_device = chan->device;
406 struct ioat_desc_sw *desc, *_desc;
407 int in_use_descs = 0;
409 /* Before freeing channel resources first check
410 * if they have been previously allocated for this channel.
412 if (ioat->desccount == 0)
417 /* Delay 100ms after reset to allow internal DMA logic to quiesce
418 * before removing DMA descriptor resources.
420 writeb(IOAT_CHANCMD_RESET,
421 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
424 spin_lock_bh(&ioat->desc_lock);
425 list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) {
426 dev_dbg(to_dev(chan), "%s: freeing %d from used list\n",
427 __func__, desc_id(desc));
428 dump_desc_dbg(ioat, desc);
430 list_del(&desc->node);
431 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
435 list_for_each_entry_safe(desc, _desc,
436 &ioat->free_desc, node) {
437 list_del(&desc->node);
438 pci_pool_free(ioatdma_device->dma_pool, desc->hw,
442 spin_unlock_bh(&ioat->desc_lock);
444 pci_pool_free(ioatdma_device->completion_pool,
446 chan->completion_dma);
448 /* one is ok since we left it on there on purpose */
449 if (in_use_descs > 1)
450 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
453 chan->last_completion = 0;
454 chan->completion_dma = 0;
460 * ioat1_dma_get_next_descriptor - return the next available descriptor
461 * @ioat: IOAT DMA channel handle
463 * Gets the next descriptor from the chain, and must be called with the
464 * channel's desc_lock held. Allocates more descriptors if the channel
467 static struct ioat_desc_sw *
468 ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat)
470 struct ioat_desc_sw *new;
472 if (!list_empty(&ioat->free_desc)) {
473 new = to_ioat_desc(ioat->free_desc.next);
474 list_del(&new->node);
476 /* try to get another desc */
477 new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC);
479 dev_err(to_dev(&ioat->base), "alloc failed\n");
483 dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n",
484 __func__, desc_id(new));
489 static struct dma_async_tx_descriptor *
490 ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
491 dma_addr_t dma_src, size_t len, unsigned long flags)
493 struct ioat_dma_chan *ioat = to_ioat_chan(c);
494 struct ioat_desc_sw *desc;
497 dma_addr_t src = dma_src;
498 dma_addr_t dest = dma_dest;
499 size_t total_len = len;
500 struct ioat_dma_descriptor *hw = NULL;
503 spin_lock_bh(&ioat->desc_lock);
504 desc = ioat1_dma_get_next_descriptor(ioat);
510 copy = min_t(size_t, len, ioat->xfercap);
518 list_add_tail(&desc->node, &chain);
524 struct ioat_desc_sw *next;
526 async_tx_ack(&desc->txd);
527 next = ioat1_dma_get_next_descriptor(ioat);
528 hw->next = next ? next->txd.phys : 0;
529 dump_desc_dbg(ioat, desc);
536 struct ioat_chan_common *chan = &ioat->base;
538 dev_err(to_dev(chan),
539 "chan%d - get_next_desc failed\n", chan_num(chan));
540 list_splice(&chain, &ioat->free_desc);
541 spin_unlock_bh(&ioat->desc_lock);
544 spin_unlock_bh(&ioat->desc_lock);
546 desc->txd.flags = flags;
547 desc->len = total_len;
548 list_splice(&chain, &desc->tx_list);
549 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
550 hw->ctl_f.compl_write = 1;
552 dump_desc_dbg(ioat, desc);
557 static void ioat1_cleanup_event(unsigned long data)
559 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
560 struct ioat_chan_common *chan = &ioat->base;
563 if (!test_bit(IOAT_RUN, &chan->state))
565 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
568 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
569 size_t len, struct ioat_dma_descriptor *hw)
571 struct pci_dev *pdev = chan->device->pdev;
572 size_t offset = len - hw->size;
574 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
575 ioat_unmap(pdev, hw->dst_addr - offset, len,
576 PCI_DMA_FROMDEVICE, flags, 1);
578 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP))
579 ioat_unmap(pdev, hw->src_addr - offset, len,
580 PCI_DMA_TODEVICE, flags, 0);
583 dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
585 dma_addr_t phys_complete;
588 completion = *chan->completion;
589 phys_complete = ioat_chansts_to_addr(completion);
591 dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__,
592 (unsigned long long) phys_complete);
594 if (is_ioat_halted(completion)) {
595 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
596 dev_err(to_dev(chan), "Channel halted, chanerr = %x\n",
599 /* TODO do something to salvage the situation */
602 return phys_complete;
605 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
606 dma_addr_t *phys_complete)
608 *phys_complete = ioat_get_current_completion(chan);
609 if (*phys_complete == chan->last_completion)
611 clear_bit(IOAT_COMPLETION_ACK, &chan->state);
612 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
617 static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
619 struct ioat_chan_common *chan = &ioat->base;
620 struct list_head *_desc, *n;
621 struct dma_async_tx_descriptor *tx;
623 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
624 __func__, (unsigned long long) phys_complete);
625 list_for_each_safe(_desc, n, &ioat->used_desc) {
626 struct ioat_desc_sw *desc;
629 desc = list_entry(_desc, typeof(*desc), node);
632 * Incoming DMA requests may use multiple descriptors,
633 * due to exceeding xfercap, perhaps. If so, only the
634 * last one will have a cookie, and require unmapping.
636 dump_desc_dbg(ioat, desc);
638 chan->completed_cookie = tx->cookie;
640 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
641 ioat->active -= desc->hw->tx_cnt;
643 tx->callback(tx->callback_param);
648 if (tx->phys != phys_complete) {
650 * a completed entry, but not the last, so clean
651 * up if the client is done with the descriptor
653 if (async_tx_test_ack(tx))
654 list_move_tail(&desc->node, &ioat->free_desc);
657 * last used desc. Do not remove, so we can
661 /* if nothing else is pending, cancel the
664 if (n == &ioat->used_desc) {
665 dev_dbg(to_dev(chan),
666 "%s cancel completion timeout\n",
668 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
671 /* TODO check status bits? */
676 chan->last_completion = phys_complete;
680 * ioat1_cleanup - cleanup up finished descriptors
681 * @chan: ioat channel to be cleaned up
683 * To prevent lock contention we defer cleanup when the locks are
684 * contended with a terminal timeout that forces cleanup and catches
685 * completion notification errors.
687 static void ioat1_cleanup(struct ioat_dma_chan *ioat)
689 struct ioat_chan_common *chan = &ioat->base;
690 dma_addr_t phys_complete;
692 prefetch(chan->completion);
694 if (!spin_trylock_bh(&chan->cleanup_lock))
697 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
698 spin_unlock_bh(&chan->cleanup_lock);
702 if (!spin_trylock_bh(&ioat->desc_lock)) {
703 spin_unlock_bh(&chan->cleanup_lock);
707 __cleanup(ioat, phys_complete);
709 spin_unlock_bh(&ioat->desc_lock);
710 spin_unlock_bh(&chan->cleanup_lock);
713 static void ioat1_timer_event(unsigned long data)
715 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
716 struct ioat_chan_common *chan = &ioat->base;
718 dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
720 spin_lock_bh(&chan->cleanup_lock);
721 if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) {
722 struct ioat_desc_sw *desc;
724 spin_lock_bh(&ioat->desc_lock);
726 /* restart active descriptors */
727 desc = to_ioat_desc(ioat->used_desc.prev);
728 ioat_set_chainaddr(ioat, desc->txd.phys);
732 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
733 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
734 spin_unlock_bh(&ioat->desc_lock);
735 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
736 dma_addr_t phys_complete;
738 spin_lock_bh(&ioat->desc_lock);
739 /* if we haven't made progress and we have already
740 * acknowledged a pending completion once, then be more
741 * forceful with a restart
743 if (ioat_cleanup_preamble(chan, &phys_complete))
744 __cleanup(ioat, phys_complete);
745 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
746 ioat1_reset_channel(ioat);
748 u64 status = ioat_chansts(chan);
750 /* manually update the last completion address */
751 if (ioat_chansts_to_addr(status) != 0)
752 *chan->completion = status;
754 set_bit(IOAT_COMPLETION_ACK, &chan->state);
755 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
757 spin_unlock_bh(&ioat->desc_lock);
759 spin_unlock_bh(&chan->cleanup_lock);
763 ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
764 struct dma_tx_state *txstate)
766 struct ioat_chan_common *chan = to_chan_common(c);
767 struct ioatdma_device *device = chan->device;
769 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
772 device->cleanup_fn((unsigned long) c);
774 return ioat_tx_status(c, cookie, txstate);
777 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
779 struct ioat_chan_common *chan = &ioat->base;
780 struct ioat_desc_sw *desc;
781 struct ioat_dma_descriptor *hw;
783 spin_lock_bh(&ioat->desc_lock);
785 desc = ioat1_dma_get_next_descriptor(ioat);
788 dev_err(to_dev(chan),
789 "Unable to start null desc - get next desc failed\n");
790 spin_unlock_bh(&ioat->desc_lock);
797 hw->ctl_f.int_en = 1;
798 hw->ctl_f.compl_write = 1;
799 /* set size to non-zero value (channel returns error when size is 0) */
800 hw->size = NULL_DESC_BUFFER_SIZE;
803 async_tx_ack(&desc->txd);
805 list_add_tail(&desc->node, &ioat->used_desc);
806 dump_desc_dbg(ioat, desc);
808 ioat_set_chainaddr(ioat, desc->txd.phys);
810 spin_unlock_bh(&ioat->desc_lock);
814 * Perform a IOAT transaction to verify the HW works.
816 #define IOAT_TEST_SIZE 2000
818 static void __devinit ioat_dma_test_callback(void *dma_async_param)
820 struct completion *cmp = dma_async_param;
826 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
827 * @device: device to be tested
829 int __devinit ioat_dma_self_test(struct ioatdma_device *device)
834 struct dma_device *dma = &device->common;
835 struct device *dev = &device->pdev->dev;
836 struct dma_chan *dma_chan;
837 struct dma_async_tx_descriptor *tx;
838 dma_addr_t dma_dest, dma_src;
841 struct completion cmp;
845 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
848 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
854 /* Fill in src buffer */
855 for (i = 0; i < IOAT_TEST_SIZE; i++)
858 /* Start copy, using first DMA channel */
859 dma_chan = container_of(dma->channels.next, struct dma_chan,
861 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
862 dev_err(dev, "selftest cannot allocate chan resource\n");
867 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
868 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
869 flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE |
871 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
872 IOAT_TEST_SIZE, flags);
874 dev_err(dev, "Self-test prep failed, disabling\n");
880 init_completion(&cmp);
881 tx->callback = ioat_dma_test_callback;
882 tx->callback_param = &cmp;
883 cookie = tx->tx_submit(tx);
885 dev_err(dev, "Self-test setup failed, disabling\n");
889 dma->device_issue_pending(dma_chan);
891 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
894 dma->device_tx_status(dma_chan, cookie, NULL)
896 dev_err(dev, "Self-test copy timed out, disabling\n");
900 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
901 dev_err(dev, "Self-test copy failed compare, disabling\n");
907 dma->device_free_chan_resources(dma_chan);
914 static char ioat_interrupt_style[32] = "msix";
915 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
916 sizeof(ioat_interrupt_style), 0644);
917 MODULE_PARM_DESC(ioat_interrupt_style,
918 "set ioat interrupt style: msix (default), "
919 "msix-single-vector, msi, intx)");
922 * ioat_dma_setup_interrupts - setup interrupt handler
923 * @device: ioat device
925 static int ioat_dma_setup_interrupts(struct ioatdma_device *device)
927 struct ioat_chan_common *chan;
928 struct pci_dev *pdev = device->pdev;
929 struct device *dev = &pdev->dev;
930 struct msix_entry *msix;
935 if (!strcmp(ioat_interrupt_style, "msix"))
937 if (!strcmp(ioat_interrupt_style, "msix-single-vector"))
938 goto msix_single_vector;
939 if (!strcmp(ioat_interrupt_style, "msi"))
941 if (!strcmp(ioat_interrupt_style, "intx"))
943 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
947 /* The number of MSI-X vectors should equal the number of channels */
948 msixcnt = device->common.chancnt;
949 for (i = 0; i < msixcnt; i++)
950 device->msix_entries[i].entry = i;
952 err = pci_enable_msix(pdev, device->msix_entries, msixcnt);
956 goto msix_single_vector;
958 for (i = 0; i < msixcnt; i++) {
959 msix = &device->msix_entries[i];
960 chan = ioat_chan_by_index(device, i);
961 err = devm_request_irq(dev, msix->vector,
962 ioat_dma_do_interrupt_msix, 0,
965 for (j = 0; j < i; j++) {
966 msix = &device->msix_entries[j];
967 chan = ioat_chan_by_index(device, j);
968 devm_free_irq(dev, msix->vector, chan);
970 goto msix_single_vector;
973 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
977 msix = &device->msix_entries[0];
979 err = pci_enable_msix(pdev, device->msix_entries, 1);
983 err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0,
984 "ioat-msix", device);
986 pci_disable_msix(pdev);
992 err = pci_enable_msi(pdev);
996 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
999 pci_disable_msi(pdev);
1005 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
1006 IRQF_SHARED, "ioat-intx", device);
1011 if (device->intr_quirk)
1012 device->intr_quirk(device);
1013 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
1014 writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET);
1018 /* Disable all interrupt generation */
1019 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1020 dev_err(dev, "no usable interrupts\n");
1024 static void ioat_disable_interrupts(struct ioatdma_device *device)
1026 /* Disable all interrupt generation */
1027 writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET);
1030 int __devinit ioat_probe(struct ioatdma_device *device)
1033 struct dma_device *dma = &device->common;
1034 struct pci_dev *pdev = device->pdev;
1035 struct device *dev = &pdev->dev;
1037 /* DMA coherent memory pool for DMA descriptor allocations */
1038 device->dma_pool = pci_pool_create("dma_desc_pool", pdev,
1039 sizeof(struct ioat_dma_descriptor),
1041 if (!device->dma_pool) {
1046 device->completion_pool = pci_pool_create("completion_pool", pdev,
1047 sizeof(u64), SMP_CACHE_BYTES,
1050 if (!device->completion_pool) {
1052 goto err_completion_pool;
1055 device->enumerate_channels(device);
1057 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1058 dma->dev = &pdev->dev;
1060 if (!dma->chancnt) {
1061 dev_err(dev, "channel enumeration error\n");
1062 goto err_setup_interrupts;
1065 err = ioat_dma_setup_interrupts(device);
1067 goto err_setup_interrupts;
1069 err = device->self_test(device);
1076 ioat_disable_interrupts(device);
1077 err_setup_interrupts:
1078 pci_pool_destroy(device->completion_pool);
1079 err_completion_pool:
1080 pci_pool_destroy(device->dma_pool);
1085 int __devinit ioat_register(struct ioatdma_device *device)
1087 int err = dma_async_device_register(&device->common);
1090 ioat_disable_interrupts(device);
1091 pci_pool_destroy(device->completion_pool);
1092 pci_pool_destroy(device->dma_pool);
1098 /* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */
1099 static void ioat1_intr_quirk(struct ioatdma_device *device)
1101 struct pci_dev *pdev = device->pdev;
1104 pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl);
1105 if (pdev->msi_enabled)
1106 dmactrl |= IOAT_PCI_DMACTRL_MSI_EN;
1108 dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN;
1109 pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl);
1112 static ssize_t ring_size_show(struct dma_chan *c, char *page)
1114 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1116 return sprintf(page, "%d\n", ioat->desccount);
1118 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
1120 static ssize_t ring_active_show(struct dma_chan *c, char *page)
1122 struct ioat_dma_chan *ioat = to_ioat_chan(c);
1124 return sprintf(page, "%d\n", ioat->active);
1126 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
1128 static ssize_t cap_show(struct dma_chan *c, char *page)
1130 struct dma_device *dma = c->device;
1132 return sprintf(page, "copy%s%s%s%s%s%s\n",
1133 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
1134 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
1135 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
1136 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
1137 dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "",
1138 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
1141 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
1143 static ssize_t version_show(struct dma_chan *c, char *page)
1145 struct dma_device *dma = c->device;
1146 struct ioatdma_device *device = to_ioatdma_device(dma);
1148 return sprintf(page, "%d.%d\n",
1149 device->version >> 4, device->version & 0xf);
1151 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
1153 static struct attribute *ioat1_attrs[] = {
1154 &ring_size_attr.attr,
1155 &ring_active_attr.attr,
1156 &ioat_cap_attr.attr,
1157 &ioat_version_attr.attr,
1162 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
1164 struct ioat_sysfs_entry *entry;
1165 struct ioat_chan_common *chan;
1167 entry = container_of(attr, struct ioat_sysfs_entry, attr);
1168 chan = container_of(kobj, struct ioat_chan_common, kobj);
1172 return entry->show(&chan->common, page);
1175 const struct sysfs_ops ioat_sysfs_ops = {
1176 .show = ioat_attr_show,
1179 static struct kobj_type ioat1_ktype = {
1180 .sysfs_ops = &ioat_sysfs_ops,
1181 .default_attrs = ioat1_attrs,
1184 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type)
1186 struct dma_device *dma = &device->common;
1189 list_for_each_entry(c, &dma->channels, device_node) {
1190 struct ioat_chan_common *chan = to_chan_common(c);
1191 struct kobject *parent = &c->dev->device.kobj;
1194 err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata");
1196 dev_warn(to_dev(chan),
1197 "sysfs init error (%d), continuing...\n", err);
1198 kobject_put(&chan->kobj);
1199 set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state);
1204 void ioat_kobject_del(struct ioatdma_device *device)
1206 struct dma_device *dma = &device->common;
1209 list_for_each_entry(c, &dma->channels, device_node) {
1210 struct ioat_chan_common *chan = to_chan_common(c);
1212 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) {
1213 kobject_del(&chan->kobj);
1214 kobject_put(&chan->kobj);
1219 int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
1221 struct pci_dev *pdev = device->pdev;
1222 struct dma_device *dma;
1225 device->intr_quirk = ioat1_intr_quirk;
1226 device->enumerate_channels = ioat1_enumerate_channels;
1227 device->self_test = ioat_dma_self_test;
1228 device->timer_fn = ioat1_timer_event;
1229 device->cleanup_fn = ioat1_cleanup_event;
1230 dma = &device->common;
1231 dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
1232 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
1233 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
1234 dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
1235 dma->device_tx_status = ioat_dma_tx_status;
1237 err = ioat_probe(device);
1240 ioat_set_tcp_copy_break(4096);
1241 err = ioat_register(device);
1244 ioat_kobject_add(device, &ioat1_ktype);
1247 device->dca = ioat_dca_init(pdev, device->reg_base);
1252 void __devexit ioat_dma_remove(struct ioatdma_device *device)
1254 struct dma_device *dma = &device->common;
1256 ioat_disable_interrupts(device);
1258 ioat_kobject_del(device);
1260 dma_async_device_unregister(dma);
1262 pci_pool_destroy(device->dma_pool);
1263 pci_pool_destroy(device->completion_pool);
1265 INIT_LIST_HEAD(&dma->channels);