Merge branch 'master' into upstream
[pandora-kernel.git] / drivers / infiniband / hw / cxgb3 / iwch_qp.c
1 /*
2  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3  * Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 #include "iwch_provider.h"
34 #include "iwch.h"
35 #include "iwch_cm.h"
36 #include "cxio_hal.h"
37
38 #define NO_SUPPORT -1
39
40 static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
41                                        u8 * flit_cnt)
42 {
43         int i;
44         u32 plen;
45
46         switch (wr->opcode) {
47         case IB_WR_SEND:
48         case IB_WR_SEND_WITH_IMM:
49                 if (wr->send_flags & IB_SEND_SOLICITED)
50                         wqe->send.rdmaop = T3_SEND_WITH_SE;
51                 else
52                         wqe->send.rdmaop = T3_SEND;
53                 wqe->send.rem_stag = 0;
54                 break;
55 #if 0                           /* Not currently supported */
56         case TYPE_SEND_INVALIDATE:
57         case TYPE_SEND_INVALIDATE_IMMEDIATE:
58                 wqe->send.rdmaop = T3_SEND_WITH_INV;
59                 wqe->send.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
60                 break;
61         case TYPE_SEND_SE_INVALIDATE:
62                 wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
63                 wqe->send.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
64                 break;
65 #endif
66         default:
67                 break;
68         }
69         if (wr->num_sge > T3_MAX_SGE)
70                 return -EINVAL;
71         wqe->send.reserved[0] = 0;
72         wqe->send.reserved[1] = 0;
73         wqe->send.reserved[2] = 0;
74         if (wr->opcode == IB_WR_SEND_WITH_IMM) {
75                 plen = 4;
76                 wqe->send.sgl[0].stag = wr->imm_data;
77                 wqe->send.sgl[0].len = __constant_cpu_to_be32(0);
78                 wqe->send.num_sgle = __constant_cpu_to_be32(0);
79                 *flit_cnt = 5;
80         } else {
81                 plen = 0;
82                 for (i = 0; i < wr->num_sge; i++) {
83                         if ((plen + wr->sg_list[i].length) < plen) {
84                                 return -EMSGSIZE;
85                         }
86                         plen += wr->sg_list[i].length;
87                         wqe->send.sgl[i].stag =
88                             cpu_to_be32(wr->sg_list[i].lkey);
89                         wqe->send.sgl[i].len =
90                             cpu_to_be32(wr->sg_list[i].length);
91                         wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
92                 }
93                 wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
94                 *flit_cnt = 4 + ((wr->num_sge) << 1);
95         }
96         wqe->send.plen = cpu_to_be32(plen);
97         return 0;
98 }
99
100 static inline int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
101                                         u8 *flit_cnt)
102 {
103         int i;
104         u32 plen;
105         if (wr->num_sge > T3_MAX_SGE)
106                 return -EINVAL;
107         wqe->write.rdmaop = T3_RDMA_WRITE;
108         wqe->write.reserved[0] = 0;
109         wqe->write.reserved[1] = 0;
110         wqe->write.reserved[2] = 0;
111         wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
112         wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
113
114         if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
115                 plen = 4;
116                 wqe->write.sgl[0].stag = wr->imm_data;
117                 wqe->write.sgl[0].len = __constant_cpu_to_be32(0);
118                 wqe->write.num_sgle = __constant_cpu_to_be32(0);
119                 *flit_cnt = 6;
120         } else {
121                 plen = 0;
122                 for (i = 0; i < wr->num_sge; i++) {
123                         if ((plen + wr->sg_list[i].length) < plen) {
124                                 return -EMSGSIZE;
125                         }
126                         plen += wr->sg_list[i].length;
127                         wqe->write.sgl[i].stag =
128                             cpu_to_be32(wr->sg_list[i].lkey);
129                         wqe->write.sgl[i].len =
130                             cpu_to_be32(wr->sg_list[i].length);
131                         wqe->write.sgl[i].to =
132                             cpu_to_be64(wr->sg_list[i].addr);
133                 }
134                 wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
135                 *flit_cnt = 5 + ((wr->num_sge) << 1);
136         }
137         wqe->write.plen = cpu_to_be32(plen);
138         return 0;
139 }
140
141 static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
142                                        u8 *flit_cnt)
143 {
144         if (wr->num_sge > 1)
145                 return -EINVAL;
146         wqe->read.rdmaop = T3_READ_REQ;
147         wqe->read.reserved[0] = 0;
148         wqe->read.reserved[1] = 0;
149         wqe->read.reserved[2] = 0;
150         wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
151         wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
152         wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
153         wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
154         wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
155         *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
156         return 0;
157 }
158
159 /*
160  * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
161  */
162 static inline int iwch_sgl2pbl_map(struct iwch_dev *rhp,
163                                    struct ib_sge *sg_list, u32 num_sgle,
164                                    u32 * pbl_addr, u8 * page_size)
165 {
166         int i;
167         struct iwch_mr *mhp;
168         u32 offset;
169         for (i = 0; i < num_sgle; i++) {
170
171                 mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
172                 if (!mhp) {
173                         PDBG("%s %d\n", __FUNCTION__, __LINE__);
174                         return -EIO;
175                 }
176                 if (!mhp->attr.state) {
177                         PDBG("%s %d\n", __FUNCTION__, __LINE__);
178                         return -EIO;
179                 }
180                 if (mhp->attr.zbva) {
181                         PDBG("%s %d\n", __FUNCTION__, __LINE__);
182                         return -EIO;
183                 }
184
185                 if (sg_list[i].addr < mhp->attr.va_fbo) {
186                         PDBG("%s %d\n", __FUNCTION__, __LINE__);
187                         return -EINVAL;
188                 }
189                 if (sg_list[i].addr + ((u64) sg_list[i].length) <
190                     sg_list[i].addr) {
191                         PDBG("%s %d\n", __FUNCTION__, __LINE__);
192                         return -EINVAL;
193                 }
194                 if (sg_list[i].addr + ((u64) sg_list[i].length) >
195                     mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
196                         PDBG("%s %d\n", __FUNCTION__, __LINE__);
197                         return -EINVAL;
198                 }
199                 offset = sg_list[i].addr - mhp->attr.va_fbo;
200                 offset += ((u32) mhp->attr.va_fbo) %
201                           (1UL << (12 + mhp->attr.page_size));
202                 pbl_addr[i] = ((mhp->attr.pbl_addr -
203                                 rhp->rdev.rnic_info.pbl_base) >> 3) +
204                               (offset >> (12 + mhp->attr.page_size));
205                 page_size[i] = mhp->attr.page_size;
206         }
207         return 0;
208 }
209
210 static inline int iwch_build_rdma_recv(struct iwch_dev *rhp,
211                                                     union t3_wr *wqe,
212                                                     struct ib_recv_wr *wr)
213 {
214         int i, err = 0;
215         u32 pbl_addr[4];
216         u8 page_size[4];
217         if (wr->num_sge > T3_MAX_SGE)
218                 return -EINVAL;
219         err = iwch_sgl2pbl_map(rhp, wr->sg_list, wr->num_sge, pbl_addr,
220                                page_size);
221         if (err)
222                 return err;
223         wqe->recv.pagesz[0] = page_size[0];
224         wqe->recv.pagesz[1] = page_size[1];
225         wqe->recv.pagesz[2] = page_size[2];
226         wqe->recv.pagesz[3] = page_size[3];
227         wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
228         for (i = 0; i < wr->num_sge; i++) {
229                 wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
230                 wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
231
232                 /* to in the WQE == the offset into the page */
233                 wqe->recv.sgl[i].to = cpu_to_be64(((u32) wr->sg_list[i].addr) %
234                                 (1UL << (12 + page_size[i])));
235
236                 /* pbl_addr is the adapters address in the PBL */
237                 wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
238         }
239         for (; i < T3_MAX_SGE; i++) {
240                 wqe->recv.sgl[i].stag = 0;
241                 wqe->recv.sgl[i].len = 0;
242                 wqe->recv.sgl[i].to = 0;
243                 wqe->recv.pbl_addr[i] = 0;
244         }
245         return 0;
246 }
247
248 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
249                       struct ib_send_wr **bad_wr)
250 {
251         int err = 0;
252         u8 t3_wr_flit_cnt;
253         enum t3_wr_opcode t3_wr_opcode = 0;
254         enum t3_wr_flags t3_wr_flags;
255         struct iwch_qp *qhp;
256         u32 idx;
257         union t3_wr *wqe;
258         u32 num_wrs;
259         unsigned long flag;
260         struct t3_swsq *sqp;
261
262         qhp = to_iwch_qp(ibqp);
263         spin_lock_irqsave(&qhp->lock, flag);
264         if (qhp->attr.state > IWCH_QP_STATE_RTS) {
265                 spin_unlock_irqrestore(&qhp->lock, flag);
266                 return -EINVAL;
267         }
268         num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
269                   qhp->wq.sq_size_log2);
270         if (num_wrs <= 0) {
271                 spin_unlock_irqrestore(&qhp->lock, flag);
272                 return -ENOMEM;
273         }
274         while (wr) {
275                 if (num_wrs == 0) {
276                         err = -ENOMEM;
277                         *bad_wr = wr;
278                         break;
279                 }
280                 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
281                 wqe = (union t3_wr *) (qhp->wq.queue + idx);
282                 t3_wr_flags = 0;
283                 if (wr->send_flags & IB_SEND_SOLICITED)
284                         t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
285                 if (wr->send_flags & IB_SEND_FENCE)
286                         t3_wr_flags |= T3_READ_FENCE_FLAG;
287                 if (wr->send_flags & IB_SEND_SIGNALED)
288                         t3_wr_flags |= T3_COMPLETION_FLAG;
289                 sqp = qhp->wq.sq +
290                       Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
291                 switch (wr->opcode) {
292                 case IB_WR_SEND:
293                 case IB_WR_SEND_WITH_IMM:
294                         t3_wr_opcode = T3_WR_SEND;
295                         err = iwch_build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
296                         break;
297                 case IB_WR_RDMA_WRITE:
298                 case IB_WR_RDMA_WRITE_WITH_IMM:
299                         t3_wr_opcode = T3_WR_WRITE;
300                         err = iwch_build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
301                         break;
302                 case IB_WR_RDMA_READ:
303                         t3_wr_opcode = T3_WR_READ;
304                         t3_wr_flags = 0; /* T3 reads are always signaled */
305                         err = iwch_build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
306                         if (err)
307                                 break;
308                         sqp->read_len = wqe->read.local_len;
309                         if (!qhp->wq.oldest_read)
310                                 qhp->wq.oldest_read = sqp;
311                         break;
312                 default:
313                         PDBG("%s post of type=%d TBD!\n", __FUNCTION__,
314                              wr->opcode);
315                         err = -EINVAL;
316                 }
317                 if (err) {
318                         *bad_wr = wr;
319                         break;
320                 }
321                 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
322                 sqp->wr_id = wr->wr_id;
323                 sqp->opcode = wr2opcode(t3_wr_opcode);
324                 sqp->sq_wptr = qhp->wq.sq_wptr;
325                 sqp->complete = 0;
326                 sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
327
328                 build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
329                                Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
330                                0, t3_wr_flit_cnt);
331                 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
332                      __FUNCTION__, (unsigned long long) wr->wr_id, idx,
333                      Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
334                      sqp->opcode);
335                 wr = wr->next;
336                 num_wrs--;
337                 ++(qhp->wq.wptr);
338                 ++(qhp->wq.sq_wptr);
339         }
340         spin_unlock_irqrestore(&qhp->lock, flag);
341         ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
342         return err;
343 }
344
345 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
346                       struct ib_recv_wr **bad_wr)
347 {
348         int err = 0;
349         struct iwch_qp *qhp;
350         u32 idx;
351         union t3_wr *wqe;
352         u32 num_wrs;
353         unsigned long flag;
354
355         qhp = to_iwch_qp(ibqp);
356         spin_lock_irqsave(&qhp->lock, flag);
357         if (qhp->attr.state > IWCH_QP_STATE_RTS) {
358                 spin_unlock_irqrestore(&qhp->lock, flag);
359                 return -EINVAL;
360         }
361         num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
362                             qhp->wq.rq_size_log2) - 1;
363         if (!wr) {
364                 spin_unlock_irqrestore(&qhp->lock, flag);
365                 return -EINVAL;
366         }
367         while (wr) {
368                 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
369                 wqe = (union t3_wr *) (qhp->wq.queue + idx);
370                 if (num_wrs)
371                         err = iwch_build_rdma_recv(qhp->rhp, wqe, wr);
372                 else
373                         err = -ENOMEM;
374                 if (err) {
375                         *bad_wr = wr;
376                         break;
377                 }
378                 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, qhp->wq.rq_size_log2)] =
379                         wr->wr_id;
380                 build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
381                                Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
382                                0, sizeof(struct t3_receive_wr) >> 3);
383                 PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
384                      "wqe %p \n", __FUNCTION__, (unsigned long long) wr->wr_id,
385                      idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
386                 ++(qhp->wq.rq_wptr);
387                 ++(qhp->wq.wptr);
388                 wr = wr->next;
389                 num_wrs--;
390         }
391         spin_unlock_irqrestore(&qhp->lock, flag);
392         ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
393         return err;
394 }
395
396 int iwch_bind_mw(struct ib_qp *qp,
397                              struct ib_mw *mw,
398                              struct ib_mw_bind *mw_bind)
399 {
400         struct iwch_dev *rhp;
401         struct iwch_mw *mhp;
402         struct iwch_qp *qhp;
403         union t3_wr *wqe;
404         u32 pbl_addr;
405         u8 page_size;
406         u32 num_wrs;
407         unsigned long flag;
408         struct ib_sge sgl;
409         int err=0;
410         enum t3_wr_flags t3_wr_flags;
411         u32 idx;
412         struct t3_swsq *sqp;
413
414         qhp = to_iwch_qp(qp);
415         mhp = to_iwch_mw(mw);
416         rhp = qhp->rhp;
417
418         spin_lock_irqsave(&qhp->lock, flag);
419         if (qhp->attr.state > IWCH_QP_STATE_RTS) {
420                 spin_unlock_irqrestore(&qhp->lock, flag);
421                 return -EINVAL;
422         }
423         num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
424                             qhp->wq.sq_size_log2);
425         if ((num_wrs) <= 0) {
426                 spin_unlock_irqrestore(&qhp->lock, flag);
427                 return -ENOMEM;
428         }
429         idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
430         PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __FUNCTION__, idx,
431              mw, mw_bind);
432         wqe = (union t3_wr *) (qhp->wq.queue + idx);
433
434         t3_wr_flags = 0;
435         if (mw_bind->send_flags & IB_SEND_SIGNALED)
436                 t3_wr_flags = T3_COMPLETION_FLAG;
437
438         sgl.addr = mw_bind->addr;
439         sgl.lkey = mw_bind->mr->lkey;
440         sgl.length = mw_bind->length;
441         wqe->bind.reserved = 0;
442         wqe->bind.type = T3_VA_BASED_TO;
443
444         /* TBD: check perms */
445         wqe->bind.perms = iwch_convert_access(mw_bind->mw_access_flags);
446         wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
447         wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
448         wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
449         wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);
450         err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
451         if (err) {
452                 spin_unlock_irqrestore(&qhp->lock, flag);
453                 return err;
454         }
455         wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
456         sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
457         sqp->wr_id = mw_bind->wr_id;
458         sqp->opcode = T3_BIND_MW;
459         sqp->sq_wptr = qhp->wq.sq_wptr;
460         sqp->complete = 0;
461         sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
462         wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
463         wqe->bind.mr_pagesz = page_size;
464         wqe->flit[T3_SQ_COOKIE_FLIT] = mw_bind->wr_id;
465         build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
466                        Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
467                                 sizeof(struct t3_bind_mw_wr) >> 3);
468         ++(qhp->wq.wptr);
469         ++(qhp->wq.sq_wptr);
470         spin_unlock_irqrestore(&qhp->lock, flag);
471
472         ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
473
474         return err;
475 }
476
477 static inline void build_term_codes(int t3err, u8 *layer_type, u8 *ecode,
478                                     int tagged)
479 {
480         switch (t3err) {
481         case TPT_ERR_STAG:
482                 if (tagged == 1) {
483                         *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
484                         *ecode = DDPT_INV_STAG;
485                 } else if (tagged == 2) {
486                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
487                         *ecode = RDMAP_INV_STAG;
488                 }
489                 break;
490         case TPT_ERR_PDID:
491         case TPT_ERR_QPID:
492         case TPT_ERR_ACCESS:
493                 if (tagged == 1) {
494                         *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
495                         *ecode = DDPT_STAG_NOT_ASSOC;
496                 } else if (tagged == 2) {
497                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
498                         *ecode = RDMAP_STAG_NOT_ASSOC;
499                 }
500                 break;
501         case TPT_ERR_WRAP:
502                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
503                 *ecode = RDMAP_TO_WRAP;
504                 break;
505         case TPT_ERR_BOUND:
506                 if (tagged == 1) {
507                         *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
508                         *ecode = DDPT_BASE_BOUNDS;
509                 } else if (tagged == 2) {
510                         *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
511                         *ecode = RDMAP_BASE_BOUNDS;
512                 } else {
513                         *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
514                         *ecode = DDPU_MSG_TOOBIG;
515                 }
516                 break;
517         case TPT_ERR_INVALIDATE_SHARED_MR:
518         case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
519                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
520                 *ecode = RDMAP_CANT_INV_STAG;
521                 break;
522         case TPT_ERR_ECC:
523         case TPT_ERR_ECC_PSTAG:
524         case TPT_ERR_INTERNAL_ERR:
525                 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
526                 *ecode = 0;
527                 break;
528         case TPT_ERR_OUT_OF_RQE:
529                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
530                 *ecode = DDPU_INV_MSN_NOBUF;
531                 break;
532         case TPT_ERR_PBL_ADDR_BOUND:
533                 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
534                 *ecode = DDPT_BASE_BOUNDS;
535                 break;
536         case TPT_ERR_CRC:
537                 *layer_type = LAYER_MPA|DDP_LLP;
538                 *ecode = MPA_CRC_ERR;
539                 break;
540         case TPT_ERR_MARKER:
541                 *layer_type = LAYER_MPA|DDP_LLP;
542                 *ecode = MPA_MARKER_ERR;
543                 break;
544         case TPT_ERR_PDU_LEN_ERR:
545                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
546                 *ecode = DDPU_MSG_TOOBIG;
547                 break;
548         case TPT_ERR_DDP_VERSION:
549                 if (tagged) {
550                         *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
551                         *ecode = DDPT_INV_VERS;
552                 } else {
553                         *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
554                         *ecode = DDPU_INV_VERS;
555                 }
556                 break;
557         case TPT_ERR_RDMA_VERSION:
558                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
559                 *ecode = RDMAP_INV_VERS;
560                 break;
561         case TPT_ERR_OPCODE:
562                 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
563                 *ecode = RDMAP_INV_OPCODE;
564                 break;
565         case TPT_ERR_DDP_QUEUE_NUM:
566                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
567                 *ecode = DDPU_INV_QN;
568                 break;
569         case TPT_ERR_MSN:
570         case TPT_ERR_MSN_GAP:
571         case TPT_ERR_MSN_RANGE:
572         case TPT_ERR_IRD_OVERFLOW:
573                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
574                 *ecode = DDPU_INV_MSN_RANGE;
575                 break;
576         case TPT_ERR_TBIT:
577                 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
578                 *ecode = 0;
579                 break;
580         case TPT_ERR_MO:
581                 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
582                 *ecode = DDPU_INV_MO;
583                 break;
584         default:
585                 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
586                 *ecode = 0;
587                 break;
588         }
589 }
590
591 /*
592  * This posts a TERMINATE with layer=RDMA, type=catastrophic.
593  */
594 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
595 {
596         union t3_wr *wqe;
597         struct terminate_message *term;
598         int status;
599         int tagged = 0;
600         struct sk_buff *skb;
601
602         PDBG("%s %d\n", __FUNCTION__, __LINE__);
603         skb = alloc_skb(40, GFP_ATOMIC);
604         if (!skb) {
605                 printk(KERN_ERR "%s cannot send TERMINATE!\n", __FUNCTION__);
606                 return -ENOMEM;
607         }
608         wqe = (union t3_wr *)skb_put(skb, 40);
609         memset(wqe, 0, 40);
610         wqe->send.rdmaop = T3_TERMINATE;
611
612         /* immediate data length */
613         wqe->send.plen = htonl(4);
614
615         /* immediate data starts here. */
616         term = (struct terminate_message *)wqe->send.sgl;
617         if (rsp_msg) {
618                 status = CQE_STATUS(rsp_msg->cqe);
619                 if (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)
620                         tagged = 1;
621                 if ((CQE_OPCODE(rsp_msg->cqe) == T3_READ_REQ) ||
622                     (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP))
623                         tagged = 2;
624         } else {
625                 status = TPT_ERR_INTERNAL_ERR;
626         }
627         build_term_codes(status, &term->layer_etype, &term->ecode, tagged);
628         build_fw_riwrh((void *)wqe, T3_WR_SEND,
629                        T3_COMPLETION_FLAG | T3_NOTIFY_FLAG, 1,
630                        qhp->ep->hwtid, 5);
631         skb->priority = CPL_PRIORITY_DATA;
632         return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
633 }
634
635 /*
636  * Assumes qhp lock is held.
637  */
638 static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
639 {
640         struct iwch_cq *rchp, *schp;
641         int count;
642
643         rchp = get_chp(qhp->rhp, qhp->attr.rcq);
644         schp = get_chp(qhp->rhp, qhp->attr.scq);
645
646         PDBG("%s qhp %p rchp %p schp %p\n", __FUNCTION__, qhp, rchp, schp);
647         /* take a ref on the qhp since we must release the lock */
648         atomic_inc(&qhp->refcnt);
649         spin_unlock_irqrestore(&qhp->lock, *flag);
650
651         /* locking heirarchy: cq lock first, then qp lock. */
652         spin_lock_irqsave(&rchp->lock, *flag);
653         spin_lock(&qhp->lock);
654         cxio_flush_hw_cq(&rchp->cq);
655         cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
656         cxio_flush_rq(&qhp->wq, &rchp->cq, count);
657         spin_unlock(&qhp->lock);
658         spin_unlock_irqrestore(&rchp->lock, *flag);
659
660         /* locking heirarchy: cq lock first, then qp lock. */
661         spin_lock_irqsave(&schp->lock, *flag);
662         spin_lock(&qhp->lock);
663         cxio_flush_hw_cq(&schp->cq);
664         cxio_count_scqes(&schp->cq, &qhp->wq, &count);
665         cxio_flush_sq(&qhp->wq, &schp->cq, count);
666         spin_unlock(&qhp->lock);
667         spin_unlock_irqrestore(&schp->lock, *flag);
668
669         /* deref */
670         if (atomic_dec_and_test(&qhp->refcnt))
671                 wake_up(&qhp->wait);
672
673         spin_lock_irqsave(&qhp->lock, *flag);
674 }
675
676 static inline void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
677 {
678         if (t3b_device(qhp->rhp))
679                 cxio_set_wq_in_error(&qhp->wq);
680         else
681                 __flush_qp(qhp, flag);
682 }
683
684
685 /*
686  * Return non zero if at least one RECV was pre-posted.
687  */
688 static inline int rqes_posted(struct iwch_qp *qhp)
689 {
690         return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV;
691 }
692
693 static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
694                                 enum iwch_qp_attr_mask mask,
695                                 struct iwch_qp_attributes *attrs)
696 {
697         struct t3_rdma_init_attr init_attr;
698         int ret;
699
700         init_attr.tid = qhp->ep->hwtid;
701         init_attr.qpid = qhp->wq.qpid;
702         init_attr.pdid = qhp->attr.pd;
703         init_attr.scqid = qhp->attr.scq;
704         init_attr.rcqid = qhp->attr.rcq;
705         init_attr.rq_addr = qhp->wq.rq_addr;
706         init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
707         init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
708                 qhp->attr.mpa_attr.recv_marker_enabled |
709                 (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
710                 (qhp->attr.mpa_attr.crc_enabled << 2);
711
712         /*
713          * XXX - The IWCM doesn't quite handle getting these
714          * attrs set before going into RTS.  For now, just turn
715          * them on always...
716          */
717 #if 0
718         init_attr.qpcaps = qhp->attr.enableRdmaRead |
719                 (qhp->attr.enableRdmaWrite << 1) |
720                 (qhp->attr.enableBind << 2) |
721                 (qhp->attr.enable_stag0_fastreg << 3) |
722                 (qhp->attr.enable_stag0_fastreg << 4);
723 #else
724         init_attr.qpcaps = 0x1f;
725 #endif
726         init_attr.tcp_emss = qhp->ep->emss;
727         init_attr.ord = qhp->attr.max_ord;
728         init_attr.ird = qhp->attr.max_ird;
729         init_attr.qp_dma_addr = qhp->wq.dma_addr;
730         init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
731         init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
732         PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
733              "flags 0x%x qpcaps 0x%x\n", __FUNCTION__,
734              init_attr.rq_addr, init_attr.rq_size,
735              init_attr.flags, init_attr.qpcaps);
736         ret = cxio_rdma_init(&rhp->rdev, &init_attr);
737         PDBG("%s ret %d\n", __FUNCTION__, ret);
738         return ret;
739 }
740
741 int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
742                                 enum iwch_qp_attr_mask mask,
743                                 struct iwch_qp_attributes *attrs,
744                                 int internal)
745 {
746         int ret = 0;
747         struct iwch_qp_attributes newattr = qhp->attr;
748         unsigned long flag;
749         int disconnect = 0;
750         int terminate = 0;
751         int abort = 0;
752         int free = 0;
753         struct iwch_ep *ep = NULL;
754
755         PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __FUNCTION__,
756              qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
757              (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
758
759         spin_lock_irqsave(&qhp->lock, flag);
760
761         /* Process attr changes if in IDLE */
762         if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
763                 if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
764                         ret = -EIO;
765                         goto out;
766                 }
767                 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_READ)
768                         newattr.enable_rdma_read = attrs->enable_rdma_read;
769                 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_WRITE)
770                         newattr.enable_rdma_write = attrs->enable_rdma_write;
771                 if (mask & IWCH_QP_ATTR_ENABLE_RDMA_BIND)
772                         newattr.enable_bind = attrs->enable_bind;
773                 if (mask & IWCH_QP_ATTR_MAX_ORD) {
774                         if (attrs->max_ord >
775                             rhp->attr.max_rdma_read_qp_depth) {
776                                 ret = -EINVAL;
777                                 goto out;
778                         }
779                         newattr.max_ord = attrs->max_ord;
780                 }
781                 if (mask & IWCH_QP_ATTR_MAX_IRD) {
782                         if (attrs->max_ird >
783                             rhp->attr.max_rdma_reads_per_qp) {
784                                 ret = -EINVAL;
785                                 goto out;
786                         }
787                         newattr.max_ird = attrs->max_ird;
788                 }
789                 qhp->attr = newattr;
790         }
791
792         if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
793                 goto out;
794         if (qhp->attr.state == attrs->next_state)
795                 goto out;
796
797         switch (qhp->attr.state) {
798         case IWCH_QP_STATE_IDLE:
799                 switch (attrs->next_state) {
800                 case IWCH_QP_STATE_RTS:
801                         if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
802                                 ret = -EINVAL;
803                                 goto out;
804                         }
805                         if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
806                                 ret = -EINVAL;
807                                 goto out;
808                         }
809                         qhp->attr.mpa_attr = attrs->mpa_attr;
810                         qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
811                         qhp->ep = qhp->attr.llp_stream_handle;
812                         qhp->attr.state = IWCH_QP_STATE_RTS;
813
814                         /*
815                          * Ref the endpoint here and deref when we
816                          * disassociate the endpoint from the QP.  This
817                          * happens in CLOSING->IDLE transition or *->ERROR
818                          * transition.
819                          */
820                         get_ep(&qhp->ep->com);
821                         spin_unlock_irqrestore(&qhp->lock, flag);
822                         ret = rdma_init(rhp, qhp, mask, attrs);
823                         spin_lock_irqsave(&qhp->lock, flag);
824                         if (ret)
825                                 goto err;
826                         break;
827                 case IWCH_QP_STATE_ERROR:
828                         qhp->attr.state = IWCH_QP_STATE_ERROR;
829                         flush_qp(qhp, &flag);
830                         break;
831                 default:
832                         ret = -EINVAL;
833                         goto out;
834                 }
835                 break;
836         case IWCH_QP_STATE_RTS:
837                 switch (attrs->next_state) {
838                 case IWCH_QP_STATE_CLOSING:
839                         BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
840                         qhp->attr.state = IWCH_QP_STATE_CLOSING;
841                         if (!internal) {
842                                 abort=0;
843                                 disconnect = 1;
844                                 ep = qhp->ep;
845                         }
846                         break;
847                 case IWCH_QP_STATE_TERMINATE:
848                         qhp->attr.state = IWCH_QP_STATE_TERMINATE;
849                         if (!internal)
850                                 terminate = 1;
851                         break;
852                 case IWCH_QP_STATE_ERROR:
853                         qhp->attr.state = IWCH_QP_STATE_ERROR;
854                         if (!internal) {
855                                 abort=1;
856                                 disconnect = 1;
857                                 ep = qhp->ep;
858                         }
859                         goto err;
860                         break;
861                 default:
862                         ret = -EINVAL;
863                         goto out;
864                 }
865                 break;
866         case IWCH_QP_STATE_CLOSING:
867                 if (!internal) {
868                         ret = -EINVAL;
869                         goto out;
870                 }
871                 switch (attrs->next_state) {
872                         case IWCH_QP_STATE_IDLE:
873                                 qhp->attr.state = IWCH_QP_STATE_IDLE;
874                                 qhp->attr.llp_stream_handle = NULL;
875                                 put_ep(&qhp->ep->com);
876                                 qhp->ep = NULL;
877                                 wake_up(&qhp->wait);
878                                 break;
879                         case IWCH_QP_STATE_ERROR:
880                                 goto err;
881                         default:
882                                 ret = -EINVAL;
883                                 goto err;
884                 }
885                 break;
886         case IWCH_QP_STATE_ERROR:
887                 if (attrs->next_state != IWCH_QP_STATE_IDLE) {
888                         ret = -EINVAL;
889                         goto out;
890                 }
891
892                 if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
893                     !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
894                         ret = -EINVAL;
895                         goto out;
896                 }
897                 qhp->attr.state = IWCH_QP_STATE_IDLE;
898                 memset(&qhp->attr, 0, sizeof(qhp->attr));
899                 break;
900         case IWCH_QP_STATE_TERMINATE:
901                 if (!internal) {
902                         ret = -EINVAL;
903                         goto out;
904                 }
905                 goto err;
906                 break;
907         default:
908                 printk(KERN_ERR "%s in a bad state %d\n",
909                        __FUNCTION__, qhp->attr.state);
910                 ret = -EINVAL;
911                 goto err;
912                 break;
913         }
914         goto out;
915 err:
916         PDBG("%s disassociating ep %p qpid 0x%x\n", __FUNCTION__, qhp->ep,
917              qhp->wq.qpid);
918
919         /* disassociate the LLP connection */
920         qhp->attr.llp_stream_handle = NULL;
921         ep = qhp->ep;
922         qhp->ep = NULL;
923         qhp->attr.state = IWCH_QP_STATE_ERROR;
924         free=1;
925         wake_up(&qhp->wait);
926         BUG_ON(!ep);
927         flush_qp(qhp, &flag);
928 out:
929         spin_unlock_irqrestore(&qhp->lock, flag);
930
931         if (terminate)
932                 iwch_post_terminate(qhp, NULL);
933
934         /*
935          * If disconnect is 1, then we need to initiate a disconnect
936          * on the EP.  This can be a normal close (RTS->CLOSING) or
937          * an abnormal close (RTS/CLOSING->ERROR).
938          */
939         if (disconnect)
940                 iwch_ep_disconnect(ep, abort, GFP_KERNEL);
941
942         /*
943          * If free is 1, then we've disassociated the EP from the QP
944          * and we need to dereference the EP.
945          */
946         if (free)
947                 put_ep(&ep->com);
948
949         PDBG("%s exit state %d\n", __FUNCTION__, qhp->attr.state);
950         return ret;
951 }
952
953 static int quiesce_qp(struct iwch_qp *qhp)
954 {
955         spin_lock_irq(&qhp->lock);
956         iwch_quiesce_tid(qhp->ep);
957         qhp->flags |= QP_QUIESCED;
958         spin_unlock_irq(&qhp->lock);
959         return 0;
960 }
961
962 static int resume_qp(struct iwch_qp *qhp)
963 {
964         spin_lock_irq(&qhp->lock);
965         iwch_resume_tid(qhp->ep);
966         qhp->flags &= ~QP_QUIESCED;
967         spin_unlock_irq(&qhp->lock);
968         return 0;
969 }
970
971 int iwch_quiesce_qps(struct iwch_cq *chp)
972 {
973         int i;
974         struct iwch_qp *qhp;
975
976         for (i=0; i < T3_MAX_NUM_QP; i++) {
977                 qhp = get_qhp(chp->rhp, i);
978                 if (!qhp)
979                         continue;
980                 if ((qhp->attr.rcq == chp->cq.cqid) && !qp_quiesced(qhp)) {
981                         quiesce_qp(qhp);
982                         continue;
983                 }
984                 if ((qhp->attr.scq == chp->cq.cqid) && !qp_quiesced(qhp))
985                         quiesce_qp(qhp);
986         }
987         return 0;
988 }
989
990 int iwch_resume_qps(struct iwch_cq *chp)
991 {
992         int i;
993         struct iwch_qp *qhp;
994
995         for (i=0; i < T3_MAX_NUM_QP; i++) {
996                 qhp = get_qhp(chp->rhp, i);
997                 if (!qhp)
998                         continue;
999                 if ((qhp->attr.rcq == chp->cq.cqid) && qp_quiesced(qhp)) {
1000                         resume_qp(qhp);
1001                         continue;
1002                 }
1003                 if ((qhp->attr.scq == chp->cq.cqid) && qp_quiesced(qhp))
1004                         resume_qp(qhp);
1005         }
1006         return 0;
1007 }