2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22 * The full GNU General Public License is included in this distribution in
23 * the file called "COPYING".
27 * Copyright(c) 2004-2009 Intel Corporation. All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions are met:
32 * * Redistributions of source code must retain the above copyright
33 * notice, this list of conditions and the following disclaimer.
34 * * Redistributions in binary form must reproduce the above copyright
35 * notice, this list of conditions and the following disclaimer in
36 * the documentation and/or other materials provided with the
38 * * Neither the name of Intel Corporation nor the names of its
39 * contributors may be used to endorse or promote products derived
40 * from this software without specific prior written permission.
42 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
43 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
46 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
47 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
48 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
49 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
50 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
51 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
52 * POSSIBILITY OF SUCH DAMAGE.
56 * Support routines for v3+ hardware
59 #include <linux/pci.h>
60 #include <linux/gfp.h>
61 #include <linux/dmaengine.h>
62 #include <linux/dma-mapping.h>
63 #include <linux/prefetch.h>
64 #include "registers.h"
69 /* ioat hardware assumes at least two sources for raid operations */
70 #define src_cnt_to_sw(x) ((x) + 2)
71 #define src_cnt_to_hw(x) ((x) - 2)
73 /* provide a lookup table for setting the source address in the base or
74 * extended descriptor of an xor or pq descriptor
76 static const u8 xor_idx_to_desc = 0xe0;
77 static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
78 static const u8 pq_idx_to_desc = 0xf8;
79 static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
81 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
83 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
85 return raw->field[xor_idx_to_field[idx]];
88 static void xor_set_src(struct ioat_raw_descriptor *descs[2],
89 dma_addr_t addr, u32 offset, int idx)
91 struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1];
93 raw->field[xor_idx_to_field[idx]] = addr + offset;
96 static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx)
98 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
100 return raw->field[pq_idx_to_field[idx]];
103 static void pq_set_src(struct ioat_raw_descriptor *descs[2],
104 dma_addr_t addr, u32 offset, u8 coef, int idx)
106 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0];
107 struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1];
109 raw->field[pq_idx_to_field[idx]] = addr + offset;
110 pq->coef[idx] = coef;
113 static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat,
114 struct ioat_ring_ent *desc, int idx)
116 struct ioat_chan_common *chan = &ioat->base;
117 struct pci_dev *pdev = chan->device->pdev;
118 size_t len = desc->len;
119 size_t offset = len - desc->hw->size;
120 struct dma_async_tx_descriptor *tx = &desc->txd;
121 enum dma_ctrl_flags flags = tx->flags;
123 switch (desc->hw->ctl_f.op) {
125 if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */
126 ioat_dma_unmap(chan, flags, len, desc->hw);
129 struct ioat_fill_descriptor *hw = desc->fill;
131 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
132 ioat_unmap(pdev, hw->dst_addr - offset, len,
133 PCI_DMA_FROMDEVICE, flags, 1);
136 case IOAT_OP_XOR_VAL:
138 struct ioat_xor_descriptor *xor = desc->xor;
139 struct ioat_ring_ent *ext;
140 struct ioat_xor_ext_descriptor *xor_ex = NULL;
141 int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt);
142 struct ioat_raw_descriptor *descs[2];
146 ext = ioat2_get_ring_ent(ioat, idx + 1);
147 xor_ex = ext->xor_ex;
150 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
151 descs[0] = (struct ioat_raw_descriptor *) xor;
152 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
153 for (i = 0; i < src_cnt; i++) {
154 dma_addr_t src = xor_get_src(descs, i);
156 ioat_unmap(pdev, src - offset, len,
157 PCI_DMA_TODEVICE, flags, 0);
160 /* dest is a source in xor validate operations */
161 if (xor->ctl_f.op == IOAT_OP_XOR_VAL) {
162 ioat_unmap(pdev, xor->dst_addr - offset, len,
163 PCI_DMA_TODEVICE, flags, 1);
168 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP))
169 ioat_unmap(pdev, xor->dst_addr - offset, len,
170 PCI_DMA_FROMDEVICE, flags, 1);
175 struct ioat_pq_descriptor *pq = desc->pq;
176 struct ioat_ring_ent *ext;
177 struct ioat_pq_ext_descriptor *pq_ex = NULL;
178 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
179 struct ioat_raw_descriptor *descs[2];
183 ext = ioat2_get_ring_ent(ioat, idx + 1);
187 /* in the 'continue' case don't unmap the dests as sources */
188 if (dmaf_p_disabled_continue(flags))
190 else if (dmaf_continue(flags))
193 if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
194 descs[0] = (struct ioat_raw_descriptor *) pq;
195 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
196 for (i = 0; i < src_cnt; i++) {
197 dma_addr_t src = pq_get_src(descs, i);
199 ioat_unmap(pdev, src - offset, len,
200 PCI_DMA_TODEVICE, flags, 0);
203 /* the dests are sources in pq validate operations */
204 if (pq->ctl_f.op == IOAT_OP_XOR_VAL) {
205 if (!(flags & DMA_PREP_PQ_DISABLE_P))
206 ioat_unmap(pdev, pq->p_addr - offset,
207 len, PCI_DMA_TODEVICE, flags, 0);
208 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
209 ioat_unmap(pdev, pq->q_addr - offset,
210 len, PCI_DMA_TODEVICE, flags, 0);
215 if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
216 if (!(flags & DMA_PREP_PQ_DISABLE_P))
217 ioat_unmap(pdev, pq->p_addr - offset, len,
218 PCI_DMA_BIDIRECTIONAL, flags, 1);
219 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
220 ioat_unmap(pdev, pq->q_addr - offset, len,
221 PCI_DMA_BIDIRECTIONAL, flags, 1);
226 dev_err(&pdev->dev, "%s: unknown op type: %#x\n",
227 __func__, desc->hw->ctl_f.op);
231 static bool desc_has_ext(struct ioat_ring_ent *desc)
233 struct ioat_dma_descriptor *hw = desc->hw;
235 if (hw->ctl_f.op == IOAT_OP_XOR ||
236 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
237 struct ioat_xor_descriptor *xor = desc->xor;
239 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
241 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
242 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
243 struct ioat_pq_descriptor *pq = desc->pq;
245 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
253 * __cleanup - reclaim used descriptors
254 * @ioat: channel (ring) to clean
256 * The difference from the dma_v2.c __cleanup() is that this routine
257 * handles extended descriptors and dma-unmapping raid operations.
259 static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
261 struct ioat_chan_common *chan = &ioat->base;
262 struct ioat_ring_ent *desc;
263 bool seen_current = false;
264 int idx = ioat->tail, i;
267 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
268 __func__, ioat->head, ioat->tail, ioat->issued);
270 active = ioat2_ring_active(ioat);
271 for (i = 0; i < active && !seen_current; i++) {
272 struct dma_async_tx_descriptor *tx;
274 smp_read_barrier_depends();
275 prefetch(ioat2_get_ring_ent(ioat, idx + i + 1));
276 desc = ioat2_get_ring_ent(ioat, idx + i);
277 dump_desc_dbg(ioat, desc);
280 chan->completed_cookie = tx->cookie;
281 ioat3_dma_unmap(ioat, desc, idx + i);
284 tx->callback(tx->callback_param);
289 if (tx->phys == phys_complete)
292 /* skip extended descriptors */
293 if (desc_has_ext(desc)) {
294 BUG_ON(i + 1 >= active);
298 smp_mb(); /* finish all descriptor reads before incrementing tail */
299 ioat->tail = idx + i;
300 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
301 chan->last_completion = phys_complete;
303 if (active - i == 0) {
304 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
306 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
307 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
309 /* 5 microsecond delay per pending descriptor */
310 writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK),
311 chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
314 static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
316 struct ioat_chan_common *chan = &ioat->base;
317 dma_addr_t phys_complete;
319 spin_lock_bh(&chan->cleanup_lock);
320 if (ioat_cleanup_preamble(chan, &phys_complete))
321 __cleanup(ioat, phys_complete);
322 spin_unlock_bh(&chan->cleanup_lock);
325 static void ioat3_cleanup_event(unsigned long data)
327 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
328 struct ioat_chan_common *chan = &ioat->base;
331 if (!test_bit(IOAT_RUN, &chan->state))
333 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
336 static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
338 struct ioat_chan_common *chan = &ioat->base;
339 dma_addr_t phys_complete;
341 ioat2_quiesce(chan, 0);
342 if (ioat_cleanup_preamble(chan, &phys_complete))
343 __cleanup(ioat, phys_complete);
345 __ioat2_restart_chan(ioat);
348 static void ioat3_timer_event(unsigned long data)
350 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
351 struct ioat_chan_common *chan = &ioat->base;
353 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
354 dma_addr_t phys_complete;
357 status = ioat_chansts(chan);
359 /* when halted due to errors check for channel
360 * programming errors before advancing the completion state
362 if (is_ioat_halted(status)) {
365 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
366 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
368 if (test_bit(IOAT_RUN, &chan->state))
369 BUG_ON(is_ioat_bug(chanerr));
370 else /* we never got off the ground */
374 /* if we haven't made progress and we have already
375 * acknowledged a pending completion once, then be more
376 * forceful with a restart
378 spin_lock_bh(&chan->cleanup_lock);
379 if (ioat_cleanup_preamble(chan, &phys_complete))
380 __cleanup(ioat, phys_complete);
381 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) {
382 spin_lock_bh(&ioat->prep_lock);
383 ioat3_restart_channel(ioat);
384 spin_unlock_bh(&ioat->prep_lock);
386 set_bit(IOAT_COMPLETION_ACK, &chan->state);
387 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
389 spin_unlock_bh(&chan->cleanup_lock);
393 /* if the ring is idle, empty, and oversized try to step
396 spin_lock_bh(&chan->cleanup_lock);
397 spin_lock_bh(&ioat->prep_lock);
398 active = ioat2_ring_active(ioat);
399 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
400 reshape_ring(ioat, ioat->alloc_order-1);
401 spin_unlock_bh(&ioat->prep_lock);
402 spin_unlock_bh(&chan->cleanup_lock);
404 /* keep shrinking until we get back to our minimum
407 if (ioat->alloc_order > ioat_get_alloc_order())
408 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
412 static enum dma_status
413 ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
414 struct dma_tx_state *txstate)
416 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
418 if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS)
423 return ioat_tx_status(c, cookie, txstate);
426 static struct dma_async_tx_descriptor *
427 ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value,
428 size_t len, unsigned long flags)
430 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
431 struct ioat_ring_ent *desc;
432 size_t total_len = len;
433 struct ioat_fill_descriptor *fill;
434 u64 src_data = (0x0101010101010101ULL) * (value & 0xff);
435 int num_descs, idx, i;
437 num_descs = ioat2_xferlen_to_descs(ioat, len);
438 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0)
444 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
446 desc = ioat2_get_ring_ent(ioat, idx + i);
449 fill->size = xfer_size;
450 fill->src_data = src_data;
451 fill->dst_addr = dest;
453 fill->ctl_f.op = IOAT_OP_FILL;
457 dump_desc_dbg(ioat, desc);
458 } while (++i < num_descs);
460 desc->txd.flags = flags;
461 desc->len = total_len;
462 fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
463 fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
464 fill->ctl_f.compl_write = 1;
465 dump_desc_dbg(ioat, desc);
467 /* we leave the channel locked to ensure in order submission */
471 static struct dma_async_tx_descriptor *
472 __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
473 dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt,
474 size_t len, unsigned long flags)
476 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
477 struct ioat_ring_ent *compl_desc;
478 struct ioat_ring_ent *desc;
479 struct ioat_ring_ent *ext;
480 size_t total_len = len;
481 struct ioat_xor_descriptor *xor;
482 struct ioat_xor_ext_descriptor *xor_ex = NULL;
483 struct ioat_dma_descriptor *hw;
484 int num_descs, with_ext, idx, i;
486 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR;
490 num_descs = ioat2_xferlen_to_descs(ioat, len);
491 /* we need 2x the number of descriptors to cover greater than 5
500 /* completion writes from the raid engine may pass completion
501 * writes from the legacy engine, so we need one extra null
502 * (legacy) descriptor to ensure all completion writes arrive in
505 if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0)
511 struct ioat_raw_descriptor *descs[2];
512 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
515 desc = ioat2_get_ring_ent(ioat, idx + i);
518 /* save a branch by unconditionally retrieving the
519 * extended descriptor xor_set_src() knows to not write
520 * to it in the single descriptor case
522 ext = ioat2_get_ring_ent(ioat, idx + i + 1);
523 xor_ex = ext->xor_ex;
525 descs[0] = (struct ioat_raw_descriptor *) xor;
526 descs[1] = (struct ioat_raw_descriptor *) xor_ex;
527 for (s = 0; s < src_cnt; s++)
528 xor_set_src(descs, src[s], offset, s);
529 xor->size = xfer_size;
530 xor->dst_addr = dest + offset;
533 xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt);
537 dump_desc_dbg(ioat, desc);
538 } while ((i += 1 + with_ext) < num_descs);
540 /* last xor descriptor carries the unmap parameters and fence bit */
541 desc->txd.flags = flags;
542 desc->len = total_len;
544 desc->result = result;
545 xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
547 /* completion descriptor carries interrupt bit */
548 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
549 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
553 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
554 hw->ctl_f.compl_write = 1;
555 hw->size = NULL_DESC_BUFFER_SIZE;
556 dump_desc_dbg(ioat, compl_desc);
558 /* we leave the channel locked to ensure in order submission */
559 return &compl_desc->txd;
562 static struct dma_async_tx_descriptor *
563 ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
564 unsigned int src_cnt, size_t len, unsigned long flags)
566 return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags);
569 struct dma_async_tx_descriptor *
570 ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
571 unsigned int src_cnt, size_t len,
572 enum sum_check_flags *result, unsigned long flags)
574 /* the cleanup routine only sets bits on validate failure, it
575 * does not clear bits on validate success... so clear it here
579 return __ioat3_prep_xor_lock(chan, result, src[0], &src[1],
580 src_cnt - 1, len, flags);
584 dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext)
586 struct device *dev = to_dev(&ioat->base);
587 struct ioat_pq_descriptor *pq = desc->pq;
588 struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL;
589 struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex };
590 int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt);
593 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x"
594 " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n",
595 desc_id(desc), (unsigned long long) desc->txd.phys,
596 (unsigned long long) (pq_ex ? pq_ex->next : pq->next),
597 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en,
598 pq->ctl_f.compl_write,
599 pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q",
601 for (i = 0; i < src_cnt; i++)
602 dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i,
603 (unsigned long long) pq_get_src(descs, i), pq->coef[i]);
604 dev_dbg(dev, "\tP: %#llx\n", pq->p_addr);
605 dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr);
608 static struct dma_async_tx_descriptor *
609 __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
610 const dma_addr_t *dst, const dma_addr_t *src,
611 unsigned int src_cnt, const unsigned char *scf,
612 size_t len, unsigned long flags)
614 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
615 struct ioat_chan_common *chan = &ioat->base;
616 struct ioat_ring_ent *compl_desc;
617 struct ioat_ring_ent *desc;
618 struct ioat_ring_ent *ext;
619 size_t total_len = len;
620 struct ioat_pq_descriptor *pq;
621 struct ioat_pq_ext_descriptor *pq_ex = NULL;
622 struct ioat_dma_descriptor *hw;
624 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ;
625 int i, s, idx, with_ext, num_descs;
627 dev_dbg(to_dev(chan), "%s\n", __func__);
628 /* the engine requires at least two sources (we provide
629 * at least 1 implied source in the DMA_PREP_CONTINUE case)
631 BUG_ON(src_cnt + dmaf_continue(flags) < 2);
633 num_descs = ioat2_xferlen_to_descs(ioat, len);
634 /* we need 2x the number of descriptors to cover greater than 3
635 * sources (we need 1 extra source in the q-only continuation
636 * case and 3 extra sources in the p+q continuation case.
638 if (src_cnt + dmaf_p_disabled_continue(flags) > 3 ||
639 (dmaf_continue(flags) && !dmaf_p_disabled_continue(flags))) {
645 /* completion writes from the raid engine may pass completion
646 * writes from the legacy engine, so we need one extra null
647 * (legacy) descriptor to ensure all completion writes arrive in
650 if (likely(num_descs) &&
651 ioat2_check_space_lock(ioat, num_descs+1) == 0)
657 struct ioat_raw_descriptor *descs[2];
658 size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log);
660 desc = ioat2_get_ring_ent(ioat, idx + i);
663 /* save a branch by unconditionally retrieving the
664 * extended descriptor pq_set_src() knows to not write
665 * to it in the single descriptor case
667 ext = ioat2_get_ring_ent(ioat, idx + i + with_ext);
670 descs[0] = (struct ioat_raw_descriptor *) pq;
671 descs[1] = (struct ioat_raw_descriptor *) pq_ex;
673 for (s = 0; s < src_cnt; s++)
674 pq_set_src(descs, src[s], offset, scf[s], s);
676 /* see the comment for dma_maxpq in include/linux/dmaengine.h */
677 if (dmaf_p_disabled_continue(flags))
678 pq_set_src(descs, dst[1], offset, 1, s++);
679 else if (dmaf_continue(flags)) {
680 pq_set_src(descs, dst[0], offset, 0, s++);
681 pq_set_src(descs, dst[1], offset, 1, s++);
682 pq_set_src(descs, dst[1], offset, 0, s++);
684 pq->size = xfer_size;
685 pq->p_addr = dst[0] + offset;
686 pq->q_addr = dst[1] + offset;
689 pq->ctl_f.src_cnt = src_cnt_to_hw(s);
690 pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P);
691 pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q);
695 } while ((i += 1 + with_ext) < num_descs);
697 /* last pq descriptor carries the unmap parameters and fence bit */
698 desc->txd.flags = flags;
699 desc->len = total_len;
701 desc->result = result;
702 pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
703 dump_pq_desc_dbg(ioat, desc, ext);
705 /* completion descriptor carries interrupt bit */
706 compl_desc = ioat2_get_ring_ent(ioat, idx + i);
707 compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT;
711 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
712 hw->ctl_f.compl_write = 1;
713 hw->size = NULL_DESC_BUFFER_SIZE;
714 dump_desc_dbg(ioat, compl_desc);
716 /* we leave the channel locked to ensure in order submission */
717 return &compl_desc->txd;
720 static struct dma_async_tx_descriptor *
721 ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
722 unsigned int src_cnt, const unsigned char *scf, size_t len,
725 /* specify valid address for disabled result */
726 if (flags & DMA_PREP_PQ_DISABLE_P)
728 if (flags & DMA_PREP_PQ_DISABLE_Q)
731 /* handle the single source multiply case from the raid6
734 if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
735 dma_addr_t single_source[2];
736 unsigned char single_source_coef[2];
738 BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q);
739 single_source[0] = src[0];
740 single_source[1] = src[0];
741 single_source_coef[0] = scf[0];
742 single_source_coef[1] = 0;
744 return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2,
745 single_source_coef, len, flags);
747 return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf,
751 struct dma_async_tx_descriptor *
752 ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
753 unsigned int src_cnt, const unsigned char *scf, size_t len,
754 enum sum_check_flags *pqres, unsigned long flags)
756 /* specify valid address for disabled result */
757 if (flags & DMA_PREP_PQ_DISABLE_P)
759 if (flags & DMA_PREP_PQ_DISABLE_Q)
762 /* the cleanup routine only sets bits on validate failure, it
763 * does not clear bits on validate success... so clear it here
767 return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len,
771 static struct dma_async_tx_descriptor *
772 ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
773 unsigned int src_cnt, size_t len, unsigned long flags)
775 unsigned char scf[src_cnt];
778 memset(scf, 0, src_cnt);
780 flags |= DMA_PREP_PQ_DISABLE_Q;
781 pq[1] = dst; /* specify valid address for disabled result */
783 return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
787 struct dma_async_tx_descriptor *
788 ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
789 unsigned int src_cnt, size_t len,
790 enum sum_check_flags *result, unsigned long flags)
792 unsigned char scf[src_cnt];
795 /* the cleanup routine only sets bits on validate failure, it
796 * does not clear bits on validate success... so clear it here
800 memset(scf, 0, src_cnt);
802 flags |= DMA_PREP_PQ_DISABLE_Q;
803 pq[1] = pq[0]; /* specify valid address for disabled result */
805 return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
809 static struct dma_async_tx_descriptor *
810 ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags)
812 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
813 struct ioat_ring_ent *desc;
814 struct ioat_dma_descriptor *hw;
816 if (ioat2_check_space_lock(ioat, 1) == 0)
817 desc = ioat2_get_ring_ent(ioat, ioat->head);
824 hw->ctl_f.int_en = 1;
825 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
826 hw->ctl_f.compl_write = 1;
827 hw->size = NULL_DESC_BUFFER_SIZE;
831 desc->txd.flags = flags;
834 dump_desc_dbg(ioat, desc);
836 /* we leave the channel locked to ensure in order submission */
840 static void __devinit ioat3_dma_test_callback(void *dma_async_param)
842 struct completion *cmp = dma_async_param;
847 #define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */
848 static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device)
852 struct page *xor_srcs[IOAT_NUM_SRC_TEST];
853 struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1];
854 dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1];
855 dma_addr_t dma_addr, dest_dma;
856 struct dma_async_tx_descriptor *tx;
857 struct dma_chan *dma_chan;
863 struct completion cmp;
865 struct device *dev = &device->pdev->dev;
866 struct dma_device *dma = &device->common;
868 dev_dbg(dev, "%s\n", __func__);
870 if (!dma_has_cap(DMA_XOR, dma->cap_mask))
873 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
874 xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
875 if (!xor_srcs[src_idx]) {
877 __free_page(xor_srcs[src_idx]);
882 dest = alloc_page(GFP_KERNEL);
885 __free_page(xor_srcs[src_idx]);
889 /* Fill in src buffers */
890 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) {
891 u8 *ptr = page_address(xor_srcs[src_idx]);
892 for (i = 0; i < PAGE_SIZE; i++)
893 ptr[i] = (1 << src_idx);
896 for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++)
897 cmp_byte ^= (u8) (1 << src_idx);
899 cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
900 (cmp_byte << 8) | cmp_byte;
902 memset(page_address(dest), 0, PAGE_SIZE);
904 dma_chan = container_of(dma->channels.next, struct dma_chan,
906 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
912 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
913 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
914 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
916 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
917 IOAT_NUM_SRC_TEST, PAGE_SIZE,
921 dev_err(dev, "Self-test xor prep failed\n");
927 init_completion(&cmp);
928 tx->callback = ioat3_dma_test_callback;
929 tx->callback_param = &cmp;
930 cookie = tx->tx_submit(tx);
932 dev_err(dev, "Self-test xor setup failed\n");
936 dma->device_issue_pending(dma_chan);
938 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
940 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
941 dev_err(dev, "Self-test xor timed out\n");
946 dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
947 for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) {
948 u32 *ptr = page_address(dest);
949 if (ptr[i] != cmp_word) {
950 dev_err(dev, "Self-test xor failed compare\n");
955 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
957 /* skip validate if the capability is not present */
958 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
961 /* validate the sources with the destintation page */
962 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
963 xor_val_srcs[i] = xor_srcs[i];
964 xor_val_srcs[i] = dest;
968 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
969 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
971 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
972 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
973 &xor_val_result, DMA_PREP_INTERRUPT);
975 dev_err(dev, "Self-test zero prep failed\n");
981 init_completion(&cmp);
982 tx->callback = ioat3_dma_test_callback;
983 tx->callback_param = &cmp;
984 cookie = tx->tx_submit(tx);
986 dev_err(dev, "Self-test zero setup failed\n");
990 dma->device_issue_pending(dma_chan);
992 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
994 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
995 dev_err(dev, "Self-test validate timed out\n");
1000 if (xor_val_result != 0) {
1001 dev_err(dev, "Self-test validate failed compare\n");
1003 goto free_resources;
1006 /* skip memset if the capability is not present */
1007 if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask))
1008 goto free_resources;
1011 dma_addr = dma_map_page(dev, dest, 0,
1012 PAGE_SIZE, DMA_FROM_DEVICE);
1013 tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE,
1014 DMA_PREP_INTERRUPT);
1016 dev_err(dev, "Self-test memset prep failed\n");
1018 goto free_resources;
1022 init_completion(&cmp);
1023 tx->callback = ioat3_dma_test_callback;
1024 tx->callback_param = &cmp;
1025 cookie = tx->tx_submit(tx);
1027 dev_err(dev, "Self-test memset setup failed\n");
1029 goto free_resources;
1031 dma->device_issue_pending(dma_chan);
1033 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1035 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1036 dev_err(dev, "Self-test memset timed out\n");
1038 goto free_resources;
1041 for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) {
1042 u32 *ptr = page_address(dest);
1044 dev_err(dev, "Self-test memset failed compare\n");
1046 goto free_resources;
1050 /* test for non-zero parity sum */
1052 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1053 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1055 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1056 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1057 &xor_val_result, DMA_PREP_INTERRUPT);
1059 dev_err(dev, "Self-test 2nd zero prep failed\n");
1061 goto free_resources;
1065 init_completion(&cmp);
1066 tx->callback = ioat3_dma_test_callback;
1067 tx->callback_param = &cmp;
1068 cookie = tx->tx_submit(tx);
1070 dev_err(dev, "Self-test 2nd zero setup failed\n");
1072 goto free_resources;
1074 dma->device_issue_pending(dma_chan);
1076 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
1078 if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) {
1079 dev_err(dev, "Self-test 2nd validate timed out\n");
1081 goto free_resources;
1084 if (xor_val_result != SUM_CHECK_P_RESULT) {
1085 dev_err(dev, "Self-test validate failed compare\n");
1087 goto free_resources;
1091 dma->device_free_chan_resources(dma_chan);
1093 src_idx = IOAT_NUM_SRC_TEST;
1095 __free_page(xor_srcs[src_idx]);
1100 static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
1102 int rc = ioat_dma_self_test(device);
1107 rc = ioat_xor_val_self_test(device);
1114 static int ioat3_reset_hw(struct ioat_chan_common *chan)
1116 /* throw away whatever the channel was doing and get it
1117 * initialized, with ioat3 specific workarounds
1119 struct ioatdma_device *device = chan->device;
1120 struct pci_dev *pdev = device->pdev;
1125 ioat2_quiesce(chan, msecs_to_jiffies(100));
1127 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1128 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
1130 /* -= IOAT ver.3 workarounds =- */
1131 /* Write CHANERRMSK_INT with 3E07h to mask out the errors
1132 * that can cause stability issues for IOAT ver.3, and clear any
1135 pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07);
1136 err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1138 dev_err(&pdev->dev, "channel error register unreachable\n");
1141 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1143 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1144 * (workaround for spurious config parity error after restart)
1146 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1147 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0)
1148 pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10);
1150 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1153 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1155 struct pci_dev *pdev = device->pdev;
1156 int dca_en = system_has_dca_enabled(pdev);
1157 struct dma_device *dma;
1159 struct ioat_chan_common *chan;
1160 bool is_raid_device = false;
1164 device->enumerate_channels = ioat2_enumerate_channels;
1165 device->reset_hw = ioat3_reset_hw;
1166 device->self_test = ioat3_dma_self_test;
1167 dma = &device->common;
1168 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
1169 dma->device_issue_pending = ioat2_issue_pending;
1170 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1171 dma->device_free_chan_resources = ioat2_free_chan_resources;
1173 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1174 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1176 cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
1178 /* dca is incompatible with raid operations */
1179 if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
1180 cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
1182 if (cap & IOAT_CAP_XOR) {
1183 is_raid_device = true;
1187 dma_cap_set(DMA_XOR, dma->cap_mask);
1188 dma->device_prep_dma_xor = ioat3_prep_xor;
1190 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1191 dma->device_prep_dma_xor_val = ioat3_prep_xor_val;
1193 if (cap & IOAT_CAP_PQ) {
1194 is_raid_device = true;
1195 dma_set_maxpq(dma, 8, 0);
1198 dma_cap_set(DMA_PQ, dma->cap_mask);
1199 dma->device_prep_dma_pq = ioat3_prep_pq;
1201 dma_cap_set(DMA_PQ_VAL, dma->cap_mask);
1202 dma->device_prep_dma_pq_val = ioat3_prep_pq_val;
1204 if (!(cap & IOAT_CAP_XOR)) {
1208 dma_cap_set(DMA_XOR, dma->cap_mask);
1209 dma->device_prep_dma_xor = ioat3_prep_pqxor;
1211 dma_cap_set(DMA_XOR_VAL, dma->cap_mask);
1212 dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val;
1215 if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) {
1216 dma_cap_set(DMA_MEMSET, dma->cap_mask);
1217 dma->device_prep_dma_memset = ioat3_prep_memset_lock;
1221 if (is_raid_device) {
1222 dma->device_tx_status = ioat3_tx_status;
1223 device->cleanup_fn = ioat3_cleanup_event;
1224 device->timer_fn = ioat3_timer_event;
1226 dma->device_tx_status = ioat_dma_tx_status;
1227 device->cleanup_fn = ioat2_cleanup_event;
1228 device->timer_fn = ioat2_timer_event;
1231 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1232 dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
1233 dma->device_prep_dma_pq_val = NULL;
1236 #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1237 dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
1238 dma->device_prep_dma_xor_val = NULL;
1241 err = ioat_probe(device);
1244 ioat_set_tcp_copy_break(262144);
1246 list_for_each_entry(c, &dma->channels, device_node) {
1247 chan = to_chan_common(c);
1248 writel(IOAT_DMA_DCA_ANY_CPU,
1249 chan->reg_base + IOAT_DCACTRL_OFFSET);
1252 err = ioat_register(device);
1256 ioat_kobject_add(device, &ioat2_ktype);
1259 device->dca = ioat3_dca_init(pdev, device->reg_base);