2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/sched.h>
33 #include "iwch_provider.h"
37 #include "cxio_resource.h"
41 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
49 if (wr->send_flags & IB_SEND_SOLICITED)
50 wqe->send.rdmaop = T3_SEND_WITH_SE;
52 wqe->send.rdmaop = T3_SEND;
53 wqe->send.rem_stag = 0;
55 case IB_WR_SEND_WITH_INV:
56 if (wr->send_flags & IB_SEND_SOLICITED)
57 wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
59 wqe->send.rdmaop = T3_SEND_WITH_INV;
60 wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
65 if (wr->num_sge > T3_MAX_SGE)
67 wqe->send.reserved[0] = 0;
68 wqe->send.reserved[1] = 0;
69 wqe->send.reserved[2] = 0;
71 for (i = 0; i < wr->num_sge; i++) {
72 if ((plen + wr->sg_list[i].length) < plen)
75 plen += wr->sg_list[i].length;
76 wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
77 wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
78 wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
80 wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
81 *flit_cnt = 4 + ((wr->num_sge) << 1);
82 wqe->send.plen = cpu_to_be32(plen);
86 static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
91 if (wr->num_sge > T3_MAX_SGE)
93 wqe->write.rdmaop = T3_RDMA_WRITE;
94 wqe->write.reserved[0] = 0;
95 wqe->write.reserved[1] = 0;
96 wqe->write.reserved[2] = 0;
97 wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
98 wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
100 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
102 wqe->write.sgl[0].stag = wr->ex.imm_data;
103 wqe->write.sgl[0].len = cpu_to_be32(0);
104 wqe->write.num_sgle = cpu_to_be32(0);
108 for (i = 0; i < wr->num_sge; i++) {
109 if ((plen + wr->sg_list[i].length) < plen) {
112 plen += wr->sg_list[i].length;
113 wqe->write.sgl[i].stag =
114 cpu_to_be32(wr->sg_list[i].lkey);
115 wqe->write.sgl[i].len =
116 cpu_to_be32(wr->sg_list[i].length);
117 wqe->write.sgl[i].to =
118 cpu_to_be64(wr->sg_list[i].addr);
120 wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
121 *flit_cnt = 5 + ((wr->num_sge) << 1);
123 wqe->write.plen = cpu_to_be32(plen);
127 static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
132 wqe->read.rdmaop = T3_READ_REQ;
133 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
134 wqe->read.local_inv = 1;
136 wqe->read.local_inv = 0;
137 wqe->read.reserved[0] = 0;
138 wqe->read.reserved[1] = 0;
139 wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
140 wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
141 wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
142 wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
143 wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
144 *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
148 static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
149 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
154 if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
157 wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
158 wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
159 wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
160 wqe->fastreg.va_base_lo_fbo =
161 cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
162 wqe->fastreg.page_type_perms = cpu_to_be32(
163 V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
164 V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
165 V_FR_TYPE(TPT_VATO) |
166 V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
167 p = &wqe->fastreg.pbl_addrs[0];
168 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
170 /* If we need a 2nd WR, then set it up */
171 if (i == T3_MAX_FASTREG_FRAG) {
173 wqe = (union t3_wr *)(wq->queue +
174 Q_PTR2IDX((wq->wptr+1), wq->size_log2));
175 build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
176 Q_GENBIT(wq->wptr + 1, wq->size_log2),
177 0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
180 p = &wqe->pbl_frag.pbl_addrs[0];
182 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
184 *flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
190 static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
193 wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
194 wqe->local_inv.reserved = 0;
195 *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
199 static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
200 u32 num_sgle, u32 * pbl_addr, u8 * page_size)
205 for (i = 0; i < num_sgle; i++) {
207 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
209 PDBG("%s %d\n", __func__, __LINE__);
212 if (!mhp->attr.state) {
213 PDBG("%s %d\n", __func__, __LINE__);
216 if (mhp->attr.zbva) {
217 PDBG("%s %d\n", __func__, __LINE__);
221 if (sg_list[i].addr < mhp->attr.va_fbo) {
222 PDBG("%s %d\n", __func__, __LINE__);
225 if (sg_list[i].addr + ((u64) sg_list[i].length) <
227 PDBG("%s %d\n", __func__, __LINE__);
230 if (sg_list[i].addr + ((u64) sg_list[i].length) >
231 mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
232 PDBG("%s %d\n", __func__, __LINE__);
235 offset = sg_list[i].addr - mhp->attr.va_fbo;
236 offset += mhp->attr.va_fbo &
237 ((1UL << (12 + mhp->attr.page_size)) - 1);
238 pbl_addr[i] = ((mhp->attr.pbl_addr -
239 rhp->rdev.rnic_info.pbl_base) >> 3) +
240 (offset >> (12 + mhp->attr.page_size));
241 page_size[i] = mhp->attr.page_size;
246 static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
247 struct ib_recv_wr *wr)
250 u32 pbl_addr[T3_MAX_SGE];
251 u8 page_size[T3_MAX_SGE];
253 err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
257 wqe->recv.pagesz[0] = page_size[0];
258 wqe->recv.pagesz[1] = page_size[1];
259 wqe->recv.pagesz[2] = page_size[2];
260 wqe->recv.pagesz[3] = page_size[3];
261 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
262 for (i = 0; i < wr->num_sge; i++) {
263 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
264 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
266 /* to in the WQE == the offset into the page */
267 wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
268 ((1UL << (12 + page_size[i])) - 1));
270 /* pbl_addr is the adapters address in the PBL */
271 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
273 for (; i < T3_MAX_SGE; i++) {
274 wqe->recv.sgl[i].stag = 0;
275 wqe->recv.sgl[i].len = 0;
276 wqe->recv.sgl[i].to = 0;
277 wqe->recv.pbl_addr[i] = 0;
279 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
280 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
281 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
282 qhp->wq.rq_size_log2)].pbl_addr = 0;
286 static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
287 struct ib_recv_wr *wr)
295 * The T3 HW requires the PBL in the HW recv descriptor to reference
296 * a PBL entry. So we allocate the max needed PBL memory here and pass
297 * it to the uP in the recv WR. The uP will build the PBL and setup
298 * the HW recv descriptor.
300 pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
305 * Compute the 8B aligned offset.
307 pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
309 wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
311 for (i = 0; i < wr->num_sge; i++) {
314 * Use a 128MB page size. This and an imposed 128MB
315 * sge length limit allows us to require only a 2-entry HW
316 * PBL for each SGE. This restriction is acceptable since
317 * since it is not possible to allocate 128MB of contiguous
318 * DMA coherent memory!
320 if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
322 wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
325 * T3 restricts a recv to all zero-stag or all non-zero-stag.
327 if (wr->sg_list[i].lkey != 0)
329 wqe->recv.sgl[i].stag = 0;
330 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
331 wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
332 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
335 for (; i < T3_MAX_SGE; i++) {
336 wqe->recv.pagesz[i] = 0;
337 wqe->recv.sgl[i].stag = 0;
338 wqe->recv.sgl[i].len = 0;
339 wqe->recv.sgl[i].to = 0;
340 wqe->recv.pbl_addr[i] = 0;
342 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
343 qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
344 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
345 qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
349 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
350 struct ib_send_wr **bad_wr)
353 u8 uninitialized_var(t3_wr_flit_cnt);
354 enum t3_wr_opcode t3_wr_opcode = 0;
355 enum t3_wr_flags t3_wr_flags;
364 qhp = to_iwch_qp(ibqp);
365 spin_lock_irqsave(&qhp->lock, flag);
366 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
367 spin_unlock_irqrestore(&qhp->lock, flag);
371 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
372 qhp->wq.sq_size_log2);
374 spin_unlock_irqrestore(&qhp->lock, flag);
383 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
384 wqe = (union t3_wr *) (qhp->wq.queue + idx);
386 if (wr->send_flags & IB_SEND_SOLICITED)
387 t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
388 if (wr->send_flags & IB_SEND_SIGNALED)
389 t3_wr_flags |= T3_COMPLETION_FLAG;
391 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
392 switch (wr->opcode) {
394 case IB_WR_SEND_WITH_INV:
395 if (wr->send_flags & IB_SEND_FENCE)
396 t3_wr_flags |= T3_READ_FENCE_FLAG;
397 t3_wr_opcode = T3_WR_SEND;
398 err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
400 case IB_WR_RDMA_WRITE:
401 case IB_WR_RDMA_WRITE_WITH_IMM:
402 t3_wr_opcode = T3_WR_WRITE;
403 err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
405 case IB_WR_RDMA_READ:
406 case IB_WR_RDMA_READ_WITH_INV:
407 t3_wr_opcode = T3_WR_READ;
408 t3_wr_flags = 0; /* T3 reads are always signaled */
409 err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
412 sqp->read_len = wqe->read.local_len;
413 if (!qhp->wq.oldest_read)
414 qhp->wq.oldest_read = sqp;
416 case IB_WR_FAST_REG_MR:
417 t3_wr_opcode = T3_WR_FASTREG;
418 err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
421 case IB_WR_LOCAL_INV:
422 if (wr->send_flags & IB_SEND_FENCE)
423 t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
424 t3_wr_opcode = T3_WR_INV_STAG;
425 err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
428 PDBG("%s post of type=%d TBD!\n", __func__,
434 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
435 sqp->wr_id = wr->wr_id;
436 sqp->opcode = wr2opcode(t3_wr_opcode);
437 sqp->sq_wptr = qhp->wq.sq_wptr;
439 sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
441 build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
442 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
444 (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
445 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
446 __func__, (unsigned long long) wr->wr_id, idx,
447 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
451 qhp->wq.wptr += wr_cnt;
454 spin_unlock_irqrestore(&qhp->lock, flag);
455 if (cxio_wq_db_enabled(&qhp->wq))
456 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
464 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
465 struct ib_recv_wr **bad_wr)
474 qhp = to_iwch_qp(ibqp);
475 spin_lock_irqsave(&qhp->lock, flag);
476 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
477 spin_unlock_irqrestore(&qhp->lock, flag);
481 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
482 qhp->wq.rq_size_log2) - 1;
484 spin_unlock_irqrestore(&qhp->lock, flag);
489 if (wr->num_sge > T3_MAX_SGE) {
493 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
494 wqe = (union t3_wr *) (qhp->wq.queue + idx);
496 if (wr->sg_list[0].lkey)
497 err = build_rdma_recv(qhp, wqe, wr);
499 err = build_zero_stag_recv(qhp, wqe, wr);
506 build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
507 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
508 0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
509 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
510 "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
511 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
517 spin_unlock_irqrestore(&qhp->lock, flag);
518 if (cxio_wq_db_enabled(&qhp->wq))
519 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
527 int iwch_bind_mw(struct ib_qp *qp,
529 struct ib_mw_bind *mw_bind)
531 struct iwch_dev *rhp;
541 enum t3_wr_flags t3_wr_flags;
545 qhp = to_iwch_qp(qp);
546 mhp = to_iwch_mw(mw);
549 spin_lock_irqsave(&qhp->lock, flag);
550 if (qhp->attr.state > IWCH_QP_STATE_RTS) {
551 spin_unlock_irqrestore(&qhp->lock, flag);
554 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
555 qhp->wq.sq_size_log2);
556 if ((num_wrs) <= 0) {
557 spin_unlock_irqrestore(&qhp->lock, flag);
560 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
561 PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
563 wqe = (union t3_wr *) (qhp->wq.queue + idx);
566 if (mw_bind->send_flags & IB_SEND_SIGNALED)
567 t3_wr_flags = T3_COMPLETION_FLAG;
569 sgl.addr = mw_bind->addr;
570 sgl.lkey = mw_bind->mr->lkey;
571 sgl.length = mw_bind->length;
572 wqe->bind.reserved = 0;
573 wqe->bind.type = TPT_VATO;
575 /* TBD: check perms */
576 wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
577 wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
578 wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
579 wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
580 wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);
581 err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
583 spin_unlock_irqrestore(&qhp->lock, flag);
586 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
587 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
588 sqp->wr_id = mw_bind->wr_id;
589 sqp->opcode = T3_BIND_MW;
590 sqp->sq_wptr = qhp->wq.sq_wptr;
592 sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
593 wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
594 wqe->bind.mr_pagesz = page_size;
595 build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
596 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
597 sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
600 spin_unlock_irqrestore(&qhp->lock, flag);
602 if (cxio_wq_db_enabled(&qhp->wq))
603 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
608 static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
609 u8 *layer_type, u8 *ecode)
611 int status = TPT_ERR_INTERNAL_ERR;
618 status = CQE_STATUS(rsp_msg->cqe);
619 opcode = CQE_OPCODE(rsp_msg->cqe);
620 rqtype = RQ_TYPE(rsp_msg->cqe);
621 send_inv = (opcode == T3_SEND_WITH_INV) ||
622 (opcode == T3_SEND_WITH_SE_INV);
623 tagged = (opcode == T3_RDMA_WRITE) ||
624 (rqtype && (opcode == T3_READ_RESP));
630 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
631 *ecode = RDMAP_CANT_INV_STAG;
633 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
634 *ecode = RDMAP_INV_STAG;
638 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
639 if ((opcode == T3_SEND_WITH_INV) ||
640 (opcode == T3_SEND_WITH_SE_INV))
641 *ecode = RDMAP_CANT_INV_STAG;
643 *ecode = RDMAP_STAG_NOT_ASSOC;
646 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
647 *ecode = RDMAP_STAG_NOT_ASSOC;
650 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
651 *ecode = RDMAP_ACC_VIOL;
654 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
655 *ecode = RDMAP_TO_WRAP;
659 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
660 *ecode = DDPT_BASE_BOUNDS;
662 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
663 *ecode = RDMAP_BASE_BOUNDS;
666 case TPT_ERR_INVALIDATE_SHARED_MR:
667 case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
668 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
669 *ecode = RDMAP_CANT_INV_STAG;
672 case TPT_ERR_ECC_PSTAG:
673 case TPT_ERR_INTERNAL_ERR:
674 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
677 case TPT_ERR_OUT_OF_RQE:
678 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
679 *ecode = DDPU_INV_MSN_NOBUF;
681 case TPT_ERR_PBL_ADDR_BOUND:
682 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
683 *ecode = DDPT_BASE_BOUNDS;
686 *layer_type = LAYER_MPA|DDP_LLP;
687 *ecode = MPA_CRC_ERR;
690 *layer_type = LAYER_MPA|DDP_LLP;
691 *ecode = MPA_MARKER_ERR;
693 case TPT_ERR_PDU_LEN_ERR:
694 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
695 *ecode = DDPU_MSG_TOOBIG;
697 case TPT_ERR_DDP_VERSION:
699 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
700 *ecode = DDPT_INV_VERS;
702 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
703 *ecode = DDPU_INV_VERS;
706 case TPT_ERR_RDMA_VERSION:
707 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
708 *ecode = RDMAP_INV_VERS;
711 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
712 *ecode = RDMAP_INV_OPCODE;
714 case TPT_ERR_DDP_QUEUE_NUM:
715 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
716 *ecode = DDPU_INV_QN;
719 case TPT_ERR_MSN_GAP:
720 case TPT_ERR_MSN_RANGE:
721 case TPT_ERR_IRD_OVERFLOW:
722 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
723 *ecode = DDPU_INV_MSN_RANGE;
726 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
730 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
731 *ecode = DDPU_INV_MO;
734 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
740 int iwch_post_zb_read(struct iwch_qp *qhp)
744 u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
746 PDBG("%s enter\n", __func__);
747 skb = alloc_skb(40, GFP_KERNEL);
749 printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
752 wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
753 memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
754 wqe->read.rdmaop = T3_READ_REQ;
755 wqe->read.reserved[0] = 0;
756 wqe->read.reserved[1] = 0;
757 wqe->read.rem_stag = cpu_to_be32(1);
758 wqe->read.rem_to = cpu_to_be64(1);
759 wqe->read.local_stag = cpu_to_be32(1);
760 wqe->read.local_len = cpu_to_be32(0);
761 wqe->read.local_to = cpu_to_be64(1);
762 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
763 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
764 V_FW_RIWR_LEN(flit_cnt));
765 skb->priority = CPL_PRIORITY_DATA;
766 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
770 * This posts a TERMINATE with layer=RDMA, type=catastrophic.
772 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
775 struct terminate_message *term;
778 PDBG("%s %d\n", __func__, __LINE__);
779 skb = alloc_skb(40, GFP_ATOMIC);
781 printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
784 wqe = (union t3_wr *)skb_put(skb, 40);
786 wqe->send.rdmaop = T3_TERMINATE;
788 /* immediate data length */
789 wqe->send.plen = htonl(4);
791 /* immediate data starts here. */
792 term = (struct terminate_message *)wqe->send.sgl;
793 build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
794 wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
795 V_FW_RIWR_FLAGS(T3_COMPLETION_FLAG | T3_NOTIFY_FLAG));
796 wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
797 skb->priority = CPL_PRIORITY_DATA;
798 return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
802 * Assumes qhp lock is held.
804 static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
806 struct iwch_cq *rchp, *schp;
810 rchp = get_chp(qhp->rhp, qhp->attr.rcq);
811 schp = get_chp(qhp->rhp, qhp->attr.scq);
813 PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
814 /* take a ref on the qhp since we must release the lock */
815 atomic_inc(&qhp->refcnt);
816 spin_unlock_irqrestore(&qhp->lock, *flag);
818 /* locking heirarchy: cq lock first, then qp lock. */
819 spin_lock_irqsave(&rchp->lock, *flag);
820 spin_lock(&qhp->lock);
821 cxio_flush_hw_cq(&rchp->cq);
822 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
823 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
824 spin_unlock(&qhp->lock);
825 spin_unlock_irqrestore(&rchp->lock, *flag);
827 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
829 /* locking heirarchy: cq lock first, then qp lock. */
830 spin_lock_irqsave(&schp->lock, *flag);
831 spin_lock(&qhp->lock);
832 cxio_flush_hw_cq(&schp->cq);
833 cxio_count_scqes(&schp->cq, &qhp->wq, &count);
834 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
835 spin_unlock(&qhp->lock);
836 spin_unlock_irqrestore(&schp->lock, *flag);
838 (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
841 if (atomic_dec_and_test(&qhp->refcnt))
844 spin_lock_irqsave(&qhp->lock, *flag);
847 static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
849 if (qhp->ibqp.uobject)
850 cxio_set_wq_in_error(&qhp->wq);
852 __flush_qp(qhp, flag);
857 * Return count of RECV WRs posted
859 u16 iwch_rqes_posted(struct iwch_qp *qhp)
861 union t3_wr *wqe = qhp->wq.queue;
863 while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
867 PDBG("%s qhp %p count %u\n", __func__, qhp, count);
871 static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
872 enum iwch_qp_attr_mask mask,
873 struct iwch_qp_attributes *attrs)
875 struct t3_rdma_init_attr init_attr;
878 init_attr.tid = qhp->ep->hwtid;
879 init_attr.qpid = qhp->wq.qpid;
880 init_attr.pdid = qhp->attr.pd;
881 init_attr.scqid = qhp->attr.scq;
882 init_attr.rcqid = qhp->attr.rcq;
883 init_attr.rq_addr = qhp->wq.rq_addr;
884 init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
885 init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
886 qhp->attr.mpa_attr.recv_marker_enabled |
887 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
888 (qhp->attr.mpa_attr.crc_enabled << 2);
890 init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
891 uP_RI_QP_RDMA_WRITE_ENABLE |
892 uP_RI_QP_BIND_ENABLE;
893 if (!qhp->ibqp.uobject)
894 init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
895 uP_RI_QP_FAST_REGISTER_ENABLE;
897 init_attr.tcp_emss = qhp->ep->emss;
898 init_attr.ord = qhp->attr.max_ord;
899 init_attr.ird = qhp->attr.max_ird;
900 init_attr.qp_dma_addr = qhp->wq.dma_addr;
901 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
902 init_attr.rqe_count = iwch_rqes_posted(qhp);
903 init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
904 init_attr.chan = qhp->ep->l2t->smt_idx;
906 init_attr.rtr_type = RTR_READ;
907 if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
909 if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
912 init_attr.rtr_type = 0;
913 init_attr.irs = qhp->ep->rcv_seq;
914 PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
915 "flags 0x%x qpcaps 0x%x\n", __func__,
916 init_attr.rq_addr, init_attr.rq_size,
917 init_attr.flags, init_attr.qpcaps);
918 ret = cxio_rdma_init(&rhp->rdev, &init_attr);
919 PDBG("%s ret %d\n", __func__, ret);
923 int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
924 enum iwch_qp_attr_mask mask,
925 struct iwch_qp_attributes *attrs,
929 struct iwch_qp_attributes newattr = qhp->attr;
935 struct iwch_ep *ep = NULL;
937 PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
938 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
939 (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
941 spin_lock_irqsave(&qhp->lock, flag);
943 /* Process attr changes if in IDLE */
944 if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
945 if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
949 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
950 newattr.enable_rdma_read = attrs->enable_rdma_read;
951 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
952 newattr.enable_rdma_write = attrs->enable_rdma_write;
953 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
954 newattr.enable_bind = attrs->enable_bind;
955 if (mask & IWCH_QP_ATTR_MAX_ORD) {
957 rhp->attr.max_rdma_read_qp_depth) {
961 newattr.max_ord = attrs->max_ord;
963 if (mask & IWCH_QP_ATTR_MAX_IRD) {
965 rhp->attr.max_rdma_reads_per_qp) {
969 newattr.max_ird = attrs->max_ird;
974 if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
976 if (qhp->attr.state == attrs->next_state)
979 switch (qhp->attr.state) {
980 case IWCH_QP_STATE_IDLE:
981 switch (attrs->next_state) {
982 case IWCH_QP_STATE_RTS:
983 if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
987 if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
991 qhp->attr.mpa_attr = attrs->mpa_attr;
992 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
993 qhp->ep = qhp->attr.llp_stream_handle;
994 qhp->attr.state = IWCH_QP_STATE_RTS;
997 * Ref the endpoint here and deref when we
998 * disassociate the endpoint from the QP. This
999 * happens in CLOSING->IDLE transition or *->ERROR
1002 get_ep(&qhp->ep->com);
1003 spin_unlock_irqrestore(&qhp->lock, flag);
1004 ret = rdma_init(rhp, qhp, mask, attrs);
1005 spin_lock_irqsave(&qhp->lock, flag);
1009 case IWCH_QP_STATE_ERROR:
1010 qhp->attr.state = IWCH_QP_STATE_ERROR;
1011 flush_qp(qhp, &flag);
1018 case IWCH_QP_STATE_RTS:
1019 switch (attrs->next_state) {
1020 case IWCH_QP_STATE_CLOSING:
1021 BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1022 qhp->attr.state = IWCH_QP_STATE_CLOSING;
1030 case IWCH_QP_STATE_TERMINATE:
1031 qhp->attr.state = IWCH_QP_STATE_TERMINATE;
1032 if (qhp->ibqp.uobject)
1033 cxio_set_wq_in_error(&qhp->wq);
1037 case IWCH_QP_STATE_ERROR:
1038 qhp->attr.state = IWCH_QP_STATE_ERROR;
1052 case IWCH_QP_STATE_CLOSING:
1057 switch (attrs->next_state) {
1058 case IWCH_QP_STATE_IDLE:
1059 flush_qp(qhp, &flag);
1060 qhp->attr.state = IWCH_QP_STATE_IDLE;
1061 qhp->attr.llp_stream_handle = NULL;
1062 put_ep(&qhp->ep->com);
1064 wake_up(&qhp->wait);
1066 case IWCH_QP_STATE_ERROR:
1073 case IWCH_QP_STATE_ERROR:
1074 if (attrs->next_state != IWCH_QP_STATE_IDLE) {
1079 if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
1080 !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
1084 qhp->attr.state = IWCH_QP_STATE_IDLE;
1086 case IWCH_QP_STATE_TERMINATE:
1094 printk(KERN_ERR "%s in a bad state %d\n",
1095 __func__, qhp->attr.state);
1102 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1105 /* disassociate the LLP connection */
1106 qhp->attr.llp_stream_handle = NULL;
1109 qhp->attr.state = IWCH_QP_STATE_ERROR;
1111 wake_up(&qhp->wait);
1113 flush_qp(qhp, &flag);
1115 spin_unlock_irqrestore(&qhp->lock, flag);
1118 iwch_post_terminate(qhp, NULL);
1121 * If disconnect is 1, then we need to initiate a disconnect
1122 * on the EP. This can be a normal close (RTS->CLOSING) or
1123 * an abnormal close (RTS/CLOSING->ERROR).
1126 iwch_ep_disconnect(ep, abort, GFP_KERNEL);
1131 * If free is 1, then we've disassociated the EP from the QP
1132 * and we need to dereference the EP.
1137 PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1141 static int quiesce_qp(struct iwch_qp *qhp)
1143 spin_lock_irq(&qhp->lock);
1144 iwch_quiesce_tid(qhp->ep);
1145 qhp->flags |= QP_QUIESCED;
1146 spin_unlock_irq(&qhp->lock);
1150 static int resume_qp(struct iwch_qp *qhp)
1152 spin_lock_irq(&qhp->lock);
1153 iwch_resume_tid(qhp->ep);
1154 qhp->flags &= ~QP_QUIESCED;
1155 spin_unlock_irq(&qhp->lock);
1159 int iwch_quiesce_qps(struct iwch_cq *chp)
1162 struct iwch_qp *qhp;
1164 for (i=0; i < T3_MAX_NUM_QP; i++) {
1165 qhp = get_qhp(chp->rhp, i);
1168 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
1172 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
1178 int iwch_resume_qps(struct iwch_cq *chp)
1181 struct iwch_qp *qhp;
1183 for (i=0; i < T3_MAX_NUM_QP; i++) {
1184 qhp = get_qhp(chp->rhp, i);
1187 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
1191 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))