2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
39 #include "registers.h"
42 int ioat_ring_alloc_order = 8;
43 module_param(ioat_ring_alloc_order, int, 0644);
44 MODULE_PARM_DESC(ioat_ring_alloc_order,
45 "ioat2+: allocate 2^n descriptors per channel (default: n=8)");
46 static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
47 module_param(ioat_ring_max_alloc_order, int, 0644);
48 MODULE_PARM_DESC(ioat_ring_max_alloc_order,
49 "ioat2+: upper limit for dynamic ring resizing (default: n=16)");
51 void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
53 void * __iomem reg_base = ioat->base.reg_base;
56 ioat->dmacount += ioat2_ring_pending(ioat);;
57 ioat->issued = ioat->head;
58 /* make descriptor updates globally visible before notifying channel */
60 writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
61 dev_dbg(to_dev(&ioat->base),
62 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
63 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
66 void ioat2_issue_pending(struct dma_chan *chan)
68 struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
70 spin_lock_bh(&ioat->ring_lock);
71 if (ioat->pending == 1)
72 __ioat2_issue_pending(ioat);
73 spin_unlock_bh(&ioat->ring_lock);
77 * ioat2_update_pending - log pending descriptors
78 * @ioat: ioat2+ channel
80 * set pending to '1' unless pending is already set to '2', pending == 2
81 * indicates that submission is temporarily blocked due to an in-flight
82 * reset. If we are already above the ioat_pending_level threshold then
85 * called with ring_lock held
87 static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
89 if (unlikely(ioat->pending == 2))
91 else if (ioat2_ring_pending(ioat) > ioat_pending_level)
92 __ioat2_issue_pending(ioat);
97 static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
99 struct ioat_ring_ent *desc;
100 struct ioat_dma_descriptor *hw;
103 if (ioat2_ring_space(ioat) < 1) {
104 dev_err(to_dev(&ioat->base),
105 "Unable to start null desc - ring full\n");
109 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
110 __func__, ioat->head, ioat->tail, ioat->issued);
111 idx = ioat2_desc_alloc(ioat, 1);
112 desc = ioat2_get_ring_ent(ioat, idx);
117 hw->ctl_f.int_en = 1;
118 hw->ctl_f.compl_write = 1;
119 /* set size to non-zero value (channel returns error when size is 0) */
120 hw->size = NULL_DESC_BUFFER_SIZE;
123 async_tx_ack(&desc->txd);
124 ioat2_set_chainaddr(ioat, desc->txd.phys);
125 dump_desc_dbg(ioat, desc);
126 __ioat2_issue_pending(ioat);
129 static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
131 spin_lock_bh(&ioat->ring_lock);
132 __ioat2_start_null_desc(ioat);
133 spin_unlock_bh(&ioat->ring_lock);
136 static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
138 struct ioat_chan_common *chan = &ioat->base;
139 struct dma_async_tx_descriptor *tx;
140 struct ioat_ring_ent *desc;
141 bool seen_current = false;
145 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
146 __func__, ioat->head, ioat->tail, ioat->issued);
148 active = ioat2_ring_active(ioat);
149 for (i = 0; i < active && !seen_current; i++) {
150 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
151 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
153 dump_desc_dbg(ioat, desc);
155 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
156 chan->completed_cookie = tx->cookie;
159 tx->callback(tx->callback_param);
164 if (tx->phys == phys_complete)
168 BUG_ON(!seen_current); /* no active descs have written a completion? */
170 chan->last_completion = phys_complete;
171 if (ioat->head == ioat->tail) {
172 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
174 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
175 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
180 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
181 * @chan: ioat channel to be cleaned up
183 static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
185 struct ioat_chan_common *chan = &ioat->base;
186 unsigned long phys_complete;
188 prefetch(chan->completion);
190 if (!spin_trylock_bh(&chan->cleanup_lock))
193 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
194 spin_unlock_bh(&chan->cleanup_lock);
198 if (!spin_trylock_bh(&ioat->ring_lock)) {
199 spin_unlock_bh(&chan->cleanup_lock);
203 __cleanup(ioat, phys_complete);
205 spin_unlock_bh(&ioat->ring_lock);
206 spin_unlock_bh(&chan->cleanup_lock);
209 void ioat2_cleanup_tasklet(unsigned long data)
211 struct ioat2_dma_chan *ioat = (void *) data;
214 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
217 void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
219 struct ioat_chan_common *chan = &ioat->base;
221 /* set the tail to be re-issued */
222 ioat->issued = ioat->tail;
224 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
225 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
227 dev_dbg(to_dev(chan),
228 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
229 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
231 if (ioat2_ring_pending(ioat)) {
232 struct ioat_ring_ent *desc;
234 desc = ioat2_get_ring_ent(ioat, ioat->tail);
235 ioat2_set_chainaddr(ioat, desc->txd.phys);
236 __ioat2_issue_pending(ioat);
238 __ioat2_start_null_desc(ioat);
241 static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
243 struct ioat_chan_common *chan = &ioat->base;
244 unsigned long phys_complete;
247 status = ioat_chansts(chan);
248 if (is_ioat_active(status) || is_ioat_idle(status))
250 while (is_ioat_active(status) || is_ioat_idle(status)) {
251 status = ioat_chansts(chan);
255 if (ioat_cleanup_preamble(chan, &phys_complete))
256 __cleanup(ioat, phys_complete);
258 __ioat2_restart_chan(ioat);
261 void ioat2_timer_event(unsigned long data)
263 struct ioat2_dma_chan *ioat = (void *) data;
264 struct ioat_chan_common *chan = &ioat->base;
266 spin_lock_bh(&chan->cleanup_lock);
267 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
268 unsigned long phys_complete;
271 spin_lock_bh(&ioat->ring_lock);
272 status = ioat_chansts(chan);
274 /* when halted due to errors check for channel
275 * programming errors before advancing the completion state
277 if (is_ioat_halted(status)) {
280 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
281 BUG_ON(is_ioat_bug(chanerr));
284 /* if we haven't made progress and we have already
285 * acknowledged a pending completion once, then be more
286 * forceful with a restart
288 if (ioat_cleanup_preamble(chan, &phys_complete))
289 __cleanup(ioat, phys_complete);
290 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
291 ioat2_restart_channel(ioat);
293 set_bit(IOAT_COMPLETION_ACK, &chan->state);
294 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
296 spin_unlock_bh(&ioat->ring_lock);
300 /* if the ring is idle, empty, and oversized try to step
303 spin_lock_bh(&ioat->ring_lock);
304 active = ioat2_ring_active(ioat);
305 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
306 reshape_ring(ioat, ioat->alloc_order-1);
307 spin_unlock_bh(&ioat->ring_lock);
309 /* keep shrinking until we get back to our minimum
312 if (ioat->alloc_order > ioat_get_alloc_order())
313 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
315 spin_unlock_bh(&chan->cleanup_lock);
319 * ioat2_enumerate_channels - find and initialize the device's channels
320 * @device: the device to be enumerated
322 int ioat2_enumerate_channels(struct ioatdma_device *device)
324 struct ioat2_dma_chan *ioat;
325 struct device *dev = &device->pdev->dev;
326 struct dma_device *dma = &device->common;
330 INIT_LIST_HEAD(&dma->channels);
331 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
332 dma->chancnt &= 0x1f; /* bits [4:0] valid */
333 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
334 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
335 dma->chancnt, ARRAY_SIZE(device->idx));
336 dma->chancnt = ARRAY_SIZE(device->idx);
338 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
339 xfercap_log &= 0x1f; /* bits [4:0] valid */
340 if (xfercap_log == 0)
342 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
344 /* FIXME which i/oat version is i7300? */
345 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
346 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
349 for (i = 0; i < dma->chancnt; i++) {
350 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
354 ioat_init_channel(device, &ioat->base, i,
356 device->cleanup_tasklet,
357 (unsigned long) ioat);
358 ioat->xfercap_log = xfercap_log;
359 spin_lock_init(&ioat->ring_lock);
365 static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
367 struct dma_chan *c = tx->chan;
368 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
369 struct ioat_chan_common *chan = &ioat->base;
370 dma_cookie_t cookie = c->cookie;
377 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
379 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
380 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
381 ioat2_update_pending(ioat);
382 spin_unlock_bh(&ioat->ring_lock);
387 static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
389 struct ioat_dma_descriptor *hw;
390 struct ioat_ring_ent *desc;
391 struct ioatdma_device *dma;
394 dma = to_ioatdma_device(chan->device);
395 hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
398 memset(hw, 0, sizeof(*hw));
400 desc = kmem_cache_alloc(ioat2_cache, flags);
402 pci_pool_free(dma->dma_pool, hw, phys);
405 memset(desc, 0, sizeof(*desc));
407 dma_async_tx_descriptor_init(&desc->txd, chan);
408 desc->txd.tx_submit = ioat2_tx_submit_unlock;
410 desc->txd.phys = phys;
414 static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
416 struct ioatdma_device *dma;
418 dma = to_ioatdma_device(chan->device);
419 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
420 kmem_cache_free(ioat2_cache, desc);
423 static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
425 struct ioat_ring_ent **ring;
426 int descs = 1 << order;
429 if (order > ioat_get_max_alloc_order())
432 /* allocate the array to hold the software ring */
433 ring = kcalloc(descs, sizeof(*ring), flags);
436 for (i = 0; i < descs; i++) {
437 ring[i] = ioat2_alloc_ring_ent(c, flags);
440 ioat2_free_ring_ent(ring[i], c);
444 set_desc_id(ring[i], i);
448 for (i = 0; i < descs-1; i++) {
449 struct ioat_ring_ent *next = ring[i+1];
450 struct ioat_dma_descriptor *hw = ring[i]->hw;
452 hw->next = next->txd.phys;
454 ring[i]->hw->next = ring[0]->txd.phys;
459 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
460 * @chan: channel to be initialized
462 int ioat2_alloc_chan_resources(struct dma_chan *c)
464 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
465 struct ioat_chan_common *chan = &ioat->base;
466 struct ioat_ring_ent **ring;
470 /* have we already been set up? */
472 return 1 << ioat->alloc_order;
474 /* Setup register to interrupt and write completion status on error */
475 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
477 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
479 dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr);
480 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
483 /* allocate a completion writeback area */
484 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
485 chan->completion = pci_pool_alloc(chan->device->completion_pool,
486 GFP_KERNEL, &chan->completion_dma);
487 if (!chan->completion)
490 memset(chan->completion, 0, sizeof(*chan->completion));
491 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
492 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
493 writel(((u64) chan->completion_dma) >> 32,
494 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
496 order = ioat_get_alloc_order();
497 ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
501 spin_lock_bh(&ioat->ring_lock);
507 ioat->alloc_order = order;
508 spin_unlock_bh(&ioat->ring_lock);
510 tasklet_enable(&chan->cleanup_task);
511 ioat2_start_null_desc(ioat);
513 return 1 << ioat->alloc_order;
516 bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
518 /* reshape differs from normal ring allocation in that we want
519 * to allocate a new software ring while only
520 * extending/truncating the hardware ring
522 struct ioat_chan_common *chan = &ioat->base;
523 struct dma_chan *c = &chan->common;
524 const u16 curr_size = ioat2_ring_mask(ioat) + 1;
525 const u16 active = ioat2_ring_active(ioat);
526 const u16 new_size = 1 << order;
527 struct ioat_ring_ent **ring;
530 if (order > ioat_get_max_alloc_order())
533 /* double check that we have at least 1 free descriptor */
534 if (active == curr_size)
537 /* when shrinking, verify that we can hold the current active
538 * set in the new ring
540 if (active >= new_size)
543 /* allocate the array to hold the software ring */
544 ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
548 /* allocate/trim descriptors as needed */
549 if (new_size > curr_size) {
550 /* copy current descriptors to the new ring */
551 for (i = 0; i < curr_size; i++) {
552 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
553 u16 new_idx = (ioat->tail+i) & (new_size-1);
555 ring[new_idx] = ioat->ring[curr_idx];
556 set_desc_id(ring[new_idx], new_idx);
559 /* add new descriptors to the ring */
560 for (i = curr_size; i < new_size; i++) {
561 u16 new_idx = (ioat->tail+i) & (new_size-1);
563 ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
564 if (!ring[new_idx]) {
566 u16 new_idx = (ioat->tail+i) & (new_size-1);
568 ioat2_free_ring_ent(ring[new_idx], c);
573 set_desc_id(ring[new_idx], new_idx);
576 /* hw link new descriptors */
577 for (i = curr_size-1; i < new_size; i++) {
578 u16 new_idx = (ioat->tail+i) & (new_size-1);
579 struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
580 struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
582 hw->next = next->txd.phys;
585 struct ioat_dma_descriptor *hw;
586 struct ioat_ring_ent *next;
588 /* copy current descriptors to the new ring, dropping the
589 * removed descriptors
591 for (i = 0; i < new_size; i++) {
592 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
593 u16 new_idx = (ioat->tail+i) & (new_size-1);
595 ring[new_idx] = ioat->ring[curr_idx];
596 set_desc_id(ring[new_idx], new_idx);
599 /* free deleted descriptors */
600 for (i = new_size; i < curr_size; i++) {
601 struct ioat_ring_ent *ent;
603 ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
604 ioat2_free_ring_ent(ent, c);
607 /* fix up hardware ring */
608 hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
609 next = ring[(ioat->tail+new_size) & (new_size-1)];
610 hw->next = next->txd.phys;
613 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
618 ioat->alloc_order = order;
624 * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
625 * @idx: gets starting descriptor index on successful allocation
626 * @ioat: ioat2,3 channel (ring) to operate on
627 * @num_descs: allocation length
629 int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
631 struct ioat_chan_common *chan = &ioat->base;
633 spin_lock_bh(&ioat->ring_lock);
634 /* never allow the last descriptor to be consumed, we need at
635 * least one free at all times to allow for on-the-fly ring
638 while (unlikely(ioat2_ring_space(ioat) <= num_descs)) {
639 if (reshape_ring(ioat, ioat->alloc_order + 1) &&
640 ioat2_ring_space(ioat) > num_descs)
643 if (printk_ratelimit())
644 dev_dbg(to_dev(chan),
645 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
646 __func__, num_descs, ioat->head, ioat->tail,
648 spin_unlock_bh(&ioat->ring_lock);
650 /* progress reclaim in the allocation failure case we
651 * may be called under bh_disabled so we need to trigger
652 * the timer event directly
654 spin_lock_bh(&chan->cleanup_lock);
655 if (jiffies > chan->timer.expires &&
656 timer_pending(&chan->timer)) {
657 struct ioatdma_device *device = chan->device;
659 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
660 spin_unlock_bh(&chan->cleanup_lock);
661 device->timer_fn((unsigned long) ioat);
663 spin_unlock_bh(&chan->cleanup_lock);
667 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
668 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
670 *idx = ioat2_desc_alloc(ioat, num_descs);
671 return 0; /* with ioat->ring_lock held */
674 struct dma_async_tx_descriptor *
675 ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
676 dma_addr_t dma_src, size_t len, unsigned long flags)
678 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
679 struct ioat_dma_descriptor *hw;
680 struct ioat_ring_ent *desc;
681 dma_addr_t dst = dma_dest;
682 dma_addr_t src = dma_src;
683 size_t total_len = len;
688 num_descs = ioat2_xferlen_to_descs(ioat, len);
689 if (likely(num_descs) &&
690 ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
694 for (i = 0; i < num_descs; i++) {
695 size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
697 desc = ioat2_get_ring_ent(ioat, idx + i);
708 dump_desc_dbg(ioat, desc);
711 desc->txd.flags = flags;
712 desc->len = total_len;
713 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
714 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
715 hw->ctl_f.compl_write = 1;
716 dump_desc_dbg(ioat, desc);
717 /* we leave the channel locked to ensure in order submission */
723 * ioat2_free_chan_resources - release all the descriptors
724 * @chan: the channel to be cleaned
726 void ioat2_free_chan_resources(struct dma_chan *c)
728 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
729 struct ioat_chan_common *chan = &ioat->base;
730 struct ioatdma_device *device = chan->device;
731 struct ioat_ring_ent *desc;
732 const u16 total_descs = 1 << ioat->alloc_order;
736 /* Before freeing channel resources first check
737 * if they have been previously allocated for this channel.
742 tasklet_disable(&chan->cleanup_task);
743 del_timer_sync(&chan->timer);
744 device->cleanup_tasklet((unsigned long) ioat);
746 /* Delay 100ms after reset to allow internal DMA logic to quiesce
747 * before removing DMA descriptor resources.
749 writeb(IOAT_CHANCMD_RESET,
750 chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version));
753 spin_lock_bh(&ioat->ring_lock);
754 descs = ioat2_ring_space(ioat);
755 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
756 for (i = 0; i < descs; i++) {
757 desc = ioat2_get_ring_ent(ioat, ioat->head + i);
758 ioat2_free_ring_ent(desc, c);
761 if (descs < total_descs)
762 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
763 total_descs - descs);
765 for (i = 0; i < total_descs - descs; i++) {
766 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
767 dump_desc_dbg(ioat, desc);
768 ioat2_free_ring_ent(desc, c);
773 ioat->alloc_order = 0;
774 pci_pool_free(device->completion_pool, chan->completion,
775 chan->completion_dma);
776 spin_unlock_bh(&ioat->ring_lock);
778 chan->last_completion = 0;
779 chan->completion_dma = 0;
785 ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
786 dma_cookie_t *done, dma_cookie_t *used)
788 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
789 struct ioatdma_device *device = ioat->base.device;
791 if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
794 device->cleanup_tasklet((unsigned long) ioat);
796 return ioat_is_complete(c, cookie, done, used);
799 static ssize_t ring_size_show(struct dma_chan *c, char *page)
801 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
803 return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
805 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
807 static ssize_t ring_active_show(struct dma_chan *c, char *page)
809 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
811 /* ...taken outside the lock, no need to be precise */
812 return sprintf(page, "%d\n", ioat2_ring_active(ioat));
814 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
816 static struct attribute *ioat2_attrs[] = {
817 &ring_size_attr.attr,
818 &ring_active_attr.attr,
820 &ioat_version_attr.attr,
824 struct kobj_type ioat2_ktype = {
825 .sysfs_ops = &ioat_sysfs_ops,
826 .default_attrs = ioat2_attrs,
829 int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
831 struct pci_dev *pdev = device->pdev;
832 struct dma_device *dma;
834 struct ioat_chan_common *chan;
837 device->enumerate_channels = ioat2_enumerate_channels;
838 device->cleanup_tasklet = ioat2_cleanup_tasklet;
839 device->timer_fn = ioat2_timer_event;
840 device->self_test = ioat_dma_self_test;
841 dma = &device->common;
842 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
843 dma->device_issue_pending = ioat2_issue_pending;
844 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
845 dma->device_free_chan_resources = ioat2_free_chan_resources;
846 dma->device_is_tx_complete = ioat2_is_complete;
848 err = ioat_probe(device);
851 ioat_set_tcp_copy_break(2048);
853 list_for_each_entry(c, &dma->channels, device_node) {
854 chan = to_chan_common(c);
855 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
856 chan->reg_base + IOAT_DCACTRL_OFFSET);
859 err = ioat_register(device);
863 ioat_kobject_add(device, &ioat2_ktype);
866 device->dca = ioat2_dca_init(pdev, device->reg_base);