Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx
[pandora-kernel.git] / drivers / infiniband / hw / qib / qib_ruc.c
1 /*
2  * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/spinlock.h>
35
36 #include "qib.h"
37 #include "qib_mad.h"
38
39 /*
40  * Convert the AETH RNR timeout code into the number of microseconds.
41  */
42 const u32 ib_qib_rnr_table[32] = {
43         655360, /* 00: 655.36 */
44         10,     /* 01:    .01 */
45         20,     /* 02     .02 */
46         30,     /* 03:    .03 */
47         40,     /* 04:    .04 */
48         60,     /* 05:    .06 */
49         80,     /* 06:    .08 */
50         120,    /* 07:    .12 */
51         160,    /* 08:    .16 */
52         240,    /* 09:    .24 */
53         320,    /* 0A:    .32 */
54         480,    /* 0B:    .48 */
55         640,    /* 0C:    .64 */
56         960,    /* 0D:    .96 */
57         1280,   /* 0E:   1.28 */
58         1920,   /* 0F:   1.92 */
59         2560,   /* 10:   2.56 */
60         3840,   /* 11:   3.84 */
61         5120,   /* 12:   5.12 */
62         7680,   /* 13:   7.68 */
63         10240,  /* 14:  10.24 */
64         15360,  /* 15:  15.36 */
65         20480,  /* 16:  20.48 */
66         30720,  /* 17:  30.72 */
67         40960,  /* 18:  40.96 */
68         61440,  /* 19:  61.44 */
69         81920,  /* 1A:  81.92 */
70         122880, /* 1B: 122.88 */
71         163840, /* 1C: 163.84 */
72         245760, /* 1D: 245.76 */
73         327680, /* 1E: 327.68 */
74         491520  /* 1F: 491.52 */
75 };
76
77 /*
78  * Validate a RWQE and fill in the SGE state.
79  * Return 1 if OK.
80  */
81 static int qib_init_sge(struct qib_qp *qp, struct qib_rwqe *wqe)
82 {
83         int i, j, ret;
84         struct ib_wc wc;
85         struct qib_lkey_table *rkt;
86         struct qib_pd *pd;
87         struct qib_sge_state *ss;
88
89         rkt = &to_idev(qp->ibqp.device)->lk_table;
90         pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
91         ss = &qp->r_sge;
92         ss->sg_list = qp->r_sg_list;
93         qp->r_len = 0;
94         for (i = j = 0; i < wqe->num_sge; i++) {
95                 if (wqe->sg_list[i].length == 0)
96                         continue;
97                 /* Check LKEY */
98                 if (!qib_lkey_ok(rkt, pd, j ? &ss->sg_list[j - 1] : &ss->sge,
99                                  &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
100                         goto bad_lkey;
101                 qp->r_len += wqe->sg_list[i].length;
102                 j++;
103         }
104         ss->num_sge = j;
105         ss->total_len = qp->r_len;
106         ret = 1;
107         goto bail;
108
109 bad_lkey:
110         while (j) {
111                 struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge;
112
113                 atomic_dec(&sge->mr->refcount);
114         }
115         ss->num_sge = 0;
116         memset(&wc, 0, sizeof(wc));
117         wc.wr_id = wqe->wr_id;
118         wc.status = IB_WC_LOC_PROT_ERR;
119         wc.opcode = IB_WC_RECV;
120         wc.qp = &qp->ibqp;
121         /* Signal solicited completion event. */
122         qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
123         ret = 0;
124 bail:
125         return ret;
126 }
127
128 /**
129  * qib_get_rwqe - copy the next RWQE into the QP's RWQE
130  * @qp: the QP
131  * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
132  *
133  * Return -1 if there is a local error, 0 if no RWQE is available,
134  * otherwise return 1.
135  *
136  * Can be called from interrupt level.
137  */
138 int qib_get_rwqe(struct qib_qp *qp, int wr_id_only)
139 {
140         unsigned long flags;
141         struct qib_rq *rq;
142         struct qib_rwq *wq;
143         struct qib_srq *srq;
144         struct qib_rwqe *wqe;
145         void (*handler)(struct ib_event *, void *);
146         u32 tail;
147         int ret;
148
149         if (qp->ibqp.srq) {
150                 srq = to_isrq(qp->ibqp.srq);
151                 handler = srq->ibsrq.event_handler;
152                 rq = &srq->rq;
153         } else {
154                 srq = NULL;
155                 handler = NULL;
156                 rq = &qp->r_rq;
157         }
158
159         spin_lock_irqsave(&rq->lock, flags);
160         if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
161                 ret = 0;
162                 goto unlock;
163         }
164
165         wq = rq->wq;
166         tail = wq->tail;
167         /* Validate tail before using it since it is user writable. */
168         if (tail >= rq->size)
169                 tail = 0;
170         if (unlikely(tail == wq->head)) {
171                 ret = 0;
172                 goto unlock;
173         }
174         /* Make sure entry is read after head index is read. */
175         smp_rmb();
176         wqe = get_rwqe_ptr(rq, tail);
177         /*
178          * Even though we update the tail index in memory, the verbs
179          * consumer is not supposed to post more entries until a
180          * completion is generated.
181          */
182         if (++tail >= rq->size)
183                 tail = 0;
184         wq->tail = tail;
185         if (!wr_id_only && !qib_init_sge(qp, wqe)) {
186                 ret = -1;
187                 goto unlock;
188         }
189         qp->r_wr_id = wqe->wr_id;
190
191         ret = 1;
192         set_bit(QIB_R_WRID_VALID, &qp->r_aflags);
193         if (handler) {
194                 u32 n;
195
196                 /*
197                  * Validate head pointer value and compute
198                  * the number of remaining WQEs.
199                  */
200                 n = wq->head;
201                 if (n >= rq->size)
202                         n = 0;
203                 if (n < tail)
204                         n += rq->size - tail;
205                 else
206                         n -= tail;
207                 if (n < srq->limit) {
208                         struct ib_event ev;
209
210                         srq->limit = 0;
211                         spin_unlock_irqrestore(&rq->lock, flags);
212                         ev.device = qp->ibqp.device;
213                         ev.element.srq = qp->ibqp.srq;
214                         ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
215                         handler(&ev, srq->ibsrq.srq_context);
216                         goto bail;
217                 }
218         }
219 unlock:
220         spin_unlock_irqrestore(&rq->lock, flags);
221 bail:
222         return ret;
223 }
224
225 /*
226  * Switch to alternate path.
227  * The QP s_lock should be held and interrupts disabled.
228  */
229 void qib_migrate_qp(struct qib_qp *qp)
230 {
231         struct ib_event ev;
232
233         qp->s_mig_state = IB_MIG_MIGRATED;
234         qp->remote_ah_attr = qp->alt_ah_attr;
235         qp->port_num = qp->alt_ah_attr.port_num;
236         qp->s_pkey_index = qp->s_alt_pkey_index;
237
238         ev.device = qp->ibqp.device;
239         ev.element.qp = &qp->ibqp;
240         ev.event = IB_EVENT_PATH_MIG;
241         qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
242 }
243
244 static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
245 {
246         if (!index) {
247                 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
248
249                 return ppd->guid;
250         } else
251                 return ibp->guids[index - 1];
252 }
253
254 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
255 {
256         return (gid->global.interface_id == id &&
257                 (gid->global.subnet_prefix == gid_prefix ||
258                  gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
259 }
260
261 /*
262  *
263  * This should be called with the QP s_lock held.
264  */
265 int qib_ruc_check_hdr(struct qib_ibport *ibp, struct qib_ib_header *hdr,
266                       int has_grh, struct qib_qp *qp, u32 bth0)
267 {
268         __be64 guid;
269
270         if (qp->s_mig_state == IB_MIG_ARMED && (bth0 & IB_BTH_MIG_REQ)) {
271                 if (!has_grh) {
272                         if (qp->alt_ah_attr.ah_flags & IB_AH_GRH)
273                                 goto err;
274                 } else {
275                         if (!(qp->alt_ah_attr.ah_flags & IB_AH_GRH))
276                                 goto err;
277                         guid = get_sguid(ibp, qp->alt_ah_attr.grh.sgid_index);
278                         if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
279                                 goto err;
280                         if (!gid_ok(&hdr->u.l.grh.sgid,
281                             qp->alt_ah_attr.grh.dgid.global.subnet_prefix,
282                             qp->alt_ah_attr.grh.dgid.global.interface_id))
283                                 goto err;
284                 }
285                 if (!qib_pkey_ok((u16)bth0,
286                                  qib_get_pkey(ibp, qp->s_alt_pkey_index))) {
287                         qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
288                                       (u16)bth0,
289                                       (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
290                                       0, qp->ibqp.qp_num,
291                                       hdr->lrh[3], hdr->lrh[1]);
292                         goto err;
293                 }
294                 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
295                 if (be16_to_cpu(hdr->lrh[3]) != qp->alt_ah_attr.dlid ||
296                     ppd_from_ibp(ibp)->port != qp->alt_ah_attr.port_num)
297                         goto err;
298                 qib_migrate_qp(qp);
299         } else {
300                 if (!has_grh) {
301                         if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
302                                 goto err;
303                 } else {
304                         if (!(qp->remote_ah_attr.ah_flags & IB_AH_GRH))
305                                 goto err;
306                         guid = get_sguid(ibp,
307                                          qp->remote_ah_attr.grh.sgid_index);
308                         if (!gid_ok(&hdr->u.l.grh.dgid, ibp->gid_prefix, guid))
309                                 goto err;
310                         if (!gid_ok(&hdr->u.l.grh.sgid,
311                             qp->remote_ah_attr.grh.dgid.global.subnet_prefix,
312                             qp->remote_ah_attr.grh.dgid.global.interface_id))
313                                 goto err;
314                 }
315                 if (!qib_pkey_ok((u16)bth0,
316                                  qib_get_pkey(ibp, qp->s_pkey_index))) {
317                         qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
318                                       (u16)bth0,
319                                       (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
320                                       0, qp->ibqp.qp_num,
321                                       hdr->lrh[3], hdr->lrh[1]);
322                         goto err;
323                 }
324                 /* Validate the SLID. See Ch. 9.6.1.5 */
325                 if (be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid ||
326                     ppd_from_ibp(ibp)->port != qp->port_num)
327                         goto err;
328                 if (qp->s_mig_state == IB_MIG_REARM &&
329                     !(bth0 & IB_BTH_MIG_REQ))
330                         qp->s_mig_state = IB_MIG_ARMED;
331         }
332
333         return 0;
334
335 err:
336         return 1;
337 }
338
339 /**
340  * qib_ruc_loopback - handle UC and RC lookback requests
341  * @sqp: the sending QP
342  *
343  * This is called from qib_do_send() to
344  * forward a WQE addressed to the same HCA.
345  * Note that although we are single threaded due to the tasklet, we still
346  * have to protect against post_send().  We don't have to worry about
347  * receive interrupts since this is a connected protocol and all packets
348  * will pass through here.
349  */
350 static void qib_ruc_loopback(struct qib_qp *sqp)
351 {
352         struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
353         struct qib_qp *qp;
354         struct qib_swqe *wqe;
355         struct qib_sge *sge;
356         unsigned long flags;
357         struct ib_wc wc;
358         u64 sdata;
359         atomic64_t *maddr;
360         enum ib_wc_status send_status;
361         int release;
362         int ret;
363
364         /*
365          * Note that we check the responder QP state after
366          * checking the requester's state.
367          */
368         qp = qib_lookup_qpn(ibp, sqp->remote_qpn);
369
370         spin_lock_irqsave(&sqp->s_lock, flags);
371
372         /* Return if we are already busy processing a work request. */
373         if ((sqp->s_flags & (QIB_S_BUSY | QIB_S_ANY_WAIT)) ||
374             !(ib_qib_state_ops[sqp->state] & QIB_PROCESS_OR_FLUSH_SEND))
375                 goto unlock;
376
377         sqp->s_flags |= QIB_S_BUSY;
378
379 again:
380         if (sqp->s_last == sqp->s_head)
381                 goto clr_busy;
382         wqe = get_swqe_ptr(sqp, sqp->s_last);
383
384         /* Return if it is not OK to start a new work reqeust. */
385         if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
386                 if (!(ib_qib_state_ops[sqp->state] & QIB_FLUSH_SEND))
387                         goto clr_busy;
388                 /* We are in the error state, flush the work request. */
389                 send_status = IB_WC_WR_FLUSH_ERR;
390                 goto flush_send;
391         }
392
393         /*
394          * We can rely on the entry not changing without the s_lock
395          * being held until we update s_last.
396          * We increment s_cur to indicate s_last is in progress.
397          */
398         if (sqp->s_last == sqp->s_cur) {
399                 if (++sqp->s_cur >= sqp->s_size)
400                         sqp->s_cur = 0;
401         }
402         spin_unlock_irqrestore(&sqp->s_lock, flags);
403
404         if (!qp || !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) ||
405             qp->ibqp.qp_type != sqp->ibqp.qp_type) {
406                 ibp->n_pkt_drops++;
407                 /*
408                  * For RC, the requester would timeout and retry so
409                  * shortcut the timeouts and just signal too many retries.
410                  */
411                 if (sqp->ibqp.qp_type == IB_QPT_RC)
412                         send_status = IB_WC_RETRY_EXC_ERR;
413                 else
414                         send_status = IB_WC_SUCCESS;
415                 goto serr;
416         }
417
418         memset(&wc, 0, sizeof wc);
419         send_status = IB_WC_SUCCESS;
420
421         release = 1;
422         sqp->s_sge.sge = wqe->sg_list[0];
423         sqp->s_sge.sg_list = wqe->sg_list + 1;
424         sqp->s_sge.num_sge = wqe->wr.num_sge;
425         sqp->s_len = wqe->length;
426         switch (wqe->wr.opcode) {
427         case IB_WR_SEND_WITH_IMM:
428                 wc.wc_flags = IB_WC_WITH_IMM;
429                 wc.ex.imm_data = wqe->wr.ex.imm_data;
430                 /* FALLTHROUGH */
431         case IB_WR_SEND:
432                 ret = qib_get_rwqe(qp, 0);
433                 if (ret < 0)
434                         goto op_err;
435                 if (!ret)
436                         goto rnr_nak;
437                 break;
438
439         case IB_WR_RDMA_WRITE_WITH_IMM:
440                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
441                         goto inv_err;
442                 wc.wc_flags = IB_WC_WITH_IMM;
443                 wc.ex.imm_data = wqe->wr.ex.imm_data;
444                 ret = qib_get_rwqe(qp, 1);
445                 if (ret < 0)
446                         goto op_err;
447                 if (!ret)
448                         goto rnr_nak;
449                 /* FALLTHROUGH */
450         case IB_WR_RDMA_WRITE:
451                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
452                         goto inv_err;
453                 if (wqe->length == 0)
454                         break;
455                 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
456                                           wqe->wr.wr.rdma.remote_addr,
457                                           wqe->wr.wr.rdma.rkey,
458                                           IB_ACCESS_REMOTE_WRITE)))
459                         goto acc_err;
460                 qp->r_sge.sg_list = NULL;
461                 qp->r_sge.num_sge = 1;
462                 qp->r_sge.total_len = wqe->length;
463                 break;
464
465         case IB_WR_RDMA_READ:
466                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
467                         goto inv_err;
468                 if (unlikely(!qib_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
469                                           wqe->wr.wr.rdma.remote_addr,
470                                           wqe->wr.wr.rdma.rkey,
471                                           IB_ACCESS_REMOTE_READ)))
472                         goto acc_err;
473                 release = 0;
474                 sqp->s_sge.sg_list = NULL;
475                 sqp->s_sge.num_sge = 1;
476                 qp->r_sge.sge = wqe->sg_list[0];
477                 qp->r_sge.sg_list = wqe->sg_list + 1;
478                 qp->r_sge.num_sge = wqe->wr.num_sge;
479                 qp->r_sge.total_len = wqe->length;
480                 break;
481
482         case IB_WR_ATOMIC_CMP_AND_SWP:
483         case IB_WR_ATOMIC_FETCH_AND_ADD:
484                 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
485                         goto inv_err;
486                 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
487                                           wqe->wr.wr.atomic.remote_addr,
488                                           wqe->wr.wr.atomic.rkey,
489                                           IB_ACCESS_REMOTE_ATOMIC)))
490                         goto acc_err;
491                 /* Perform atomic OP and save result. */
492                 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
493                 sdata = wqe->wr.wr.atomic.compare_add;
494                 *(u64 *) sqp->s_sge.sge.vaddr =
495                         (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
496                         (u64) atomic64_add_return(sdata, maddr) - sdata :
497                         (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
498                                       sdata, wqe->wr.wr.atomic.swap);
499                 atomic_dec(&qp->r_sge.sge.mr->refcount);
500                 qp->r_sge.num_sge = 0;
501                 goto send_comp;
502
503         default:
504                 send_status = IB_WC_LOC_QP_OP_ERR;
505                 goto serr;
506         }
507
508         sge = &sqp->s_sge.sge;
509         while (sqp->s_len) {
510                 u32 len = sqp->s_len;
511
512                 if (len > sge->length)
513                         len = sge->length;
514                 if (len > sge->sge_length)
515                         len = sge->sge_length;
516                 BUG_ON(len == 0);
517                 qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
518                 sge->vaddr += len;
519                 sge->length -= len;
520                 sge->sge_length -= len;
521                 if (sge->sge_length == 0) {
522                         if (!release)
523                                 atomic_dec(&sge->mr->refcount);
524                         if (--sqp->s_sge.num_sge)
525                                 *sge = *sqp->s_sge.sg_list++;
526                 } else if (sge->length == 0 && sge->mr->lkey) {
527                         if (++sge->n >= QIB_SEGSZ) {
528                                 if (++sge->m >= sge->mr->mapsz)
529                                         break;
530                                 sge->n = 0;
531                         }
532                         sge->vaddr =
533                                 sge->mr->map[sge->m]->segs[sge->n].vaddr;
534                         sge->length =
535                                 sge->mr->map[sge->m]->segs[sge->n].length;
536                 }
537                 sqp->s_len -= len;
538         }
539         if (release)
540                 while (qp->r_sge.num_sge) {
541                         atomic_dec(&qp->r_sge.sge.mr->refcount);
542                         if (--qp->r_sge.num_sge)
543                                 qp->r_sge.sge = *qp->r_sge.sg_list++;
544                 }
545
546         if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
547                 goto send_comp;
548
549         if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
550                 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
551         else
552                 wc.opcode = IB_WC_RECV;
553         wc.wr_id = qp->r_wr_id;
554         wc.status = IB_WC_SUCCESS;
555         wc.byte_len = wqe->length;
556         wc.qp = &qp->ibqp;
557         wc.src_qp = qp->remote_qpn;
558         wc.slid = qp->remote_ah_attr.dlid;
559         wc.sl = qp->remote_ah_attr.sl;
560         wc.port_num = 1;
561         /* Signal completion event if the solicited bit is set. */
562         qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
563                        wqe->wr.send_flags & IB_SEND_SOLICITED);
564
565 send_comp:
566         spin_lock_irqsave(&sqp->s_lock, flags);
567         ibp->n_loop_pkts++;
568 flush_send:
569         sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
570         qib_send_complete(sqp, wqe, send_status);
571         goto again;
572
573 rnr_nak:
574         /* Handle RNR NAK */
575         if (qp->ibqp.qp_type == IB_QPT_UC)
576                 goto send_comp;
577         ibp->n_rnr_naks++;
578         /*
579          * Note: we don't need the s_lock held since the BUSY flag
580          * makes this single threaded.
581          */
582         if (sqp->s_rnr_retry == 0) {
583                 send_status = IB_WC_RNR_RETRY_EXC_ERR;
584                 goto serr;
585         }
586         if (sqp->s_rnr_retry_cnt < 7)
587                 sqp->s_rnr_retry--;
588         spin_lock_irqsave(&sqp->s_lock, flags);
589         if (!(ib_qib_state_ops[sqp->state] & QIB_PROCESS_RECV_OK))
590                 goto clr_busy;
591         sqp->s_flags |= QIB_S_WAIT_RNR;
592         sqp->s_timer.function = qib_rc_rnr_retry;
593         sqp->s_timer.expires = jiffies +
594                 usecs_to_jiffies(ib_qib_rnr_table[qp->r_min_rnr_timer]);
595         add_timer(&sqp->s_timer);
596         goto clr_busy;
597
598 op_err:
599         send_status = IB_WC_REM_OP_ERR;
600         wc.status = IB_WC_LOC_QP_OP_ERR;
601         goto err;
602
603 inv_err:
604         send_status = IB_WC_REM_INV_REQ_ERR;
605         wc.status = IB_WC_LOC_QP_OP_ERR;
606         goto err;
607
608 acc_err:
609         send_status = IB_WC_REM_ACCESS_ERR;
610         wc.status = IB_WC_LOC_PROT_ERR;
611 err:
612         /* responder goes to error state */
613         qib_rc_error(qp, wc.status);
614
615 serr:
616         spin_lock_irqsave(&sqp->s_lock, flags);
617         qib_send_complete(sqp, wqe, send_status);
618         if (sqp->ibqp.qp_type == IB_QPT_RC) {
619                 int lastwqe = qib_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
620
621                 sqp->s_flags &= ~QIB_S_BUSY;
622                 spin_unlock_irqrestore(&sqp->s_lock, flags);
623                 if (lastwqe) {
624                         struct ib_event ev;
625
626                         ev.device = sqp->ibqp.device;
627                         ev.element.qp = &sqp->ibqp;
628                         ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
629                         sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
630                 }
631                 goto done;
632         }
633 clr_busy:
634         sqp->s_flags &= ~QIB_S_BUSY;
635 unlock:
636         spin_unlock_irqrestore(&sqp->s_lock, flags);
637 done:
638         if (qp && atomic_dec_and_test(&qp->refcount))
639                 wake_up(&qp->wait);
640 }
641
642 /**
643  * qib_make_grh - construct a GRH header
644  * @ibp: a pointer to the IB port
645  * @hdr: a pointer to the GRH header being constructed
646  * @grh: the global route address to send to
647  * @hwords: the number of 32 bit words of header being sent
648  * @nwords: the number of 32 bit words of data being sent
649  *
650  * Return the size of the header in 32 bit words.
651  */
652 u32 qib_make_grh(struct qib_ibport *ibp, struct ib_grh *hdr,
653                  struct ib_global_route *grh, u32 hwords, u32 nwords)
654 {
655         hdr->version_tclass_flow =
656                 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
657                             (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
658                             (grh->flow_label << IB_GRH_FLOW_SHIFT));
659         hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
660         /* next_hdr is defined by C8-7 in ch. 8.4.1 */
661         hdr->next_hdr = IB_GRH_NEXT_HDR;
662         hdr->hop_limit = grh->hop_limit;
663         /* The SGID is 32-bit aligned. */
664         hdr->sgid.global.subnet_prefix = ibp->gid_prefix;
665         hdr->sgid.global.interface_id = grh->sgid_index ?
666                 ibp->guids[grh->sgid_index - 1] : ppd_from_ibp(ibp)->guid;
667         hdr->dgid = grh->dgid;
668
669         /* GRH header size in 32-bit words. */
670         return sizeof(struct ib_grh) / sizeof(u32);
671 }
672
673 void qib_make_ruc_header(struct qib_qp *qp, struct qib_other_headers *ohdr,
674                          u32 bth0, u32 bth2)
675 {
676         struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
677         u16 lrh0;
678         u32 nwords;
679         u32 extra_bytes;
680
681         /* Construct the header. */
682         extra_bytes = -qp->s_cur_size & 3;
683         nwords = (qp->s_cur_size + extra_bytes) >> 2;
684         lrh0 = QIB_LRH_BTH;
685         if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
686                 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
687                                                &qp->remote_ah_attr.grh,
688                                                qp->s_hdrwords, nwords);
689                 lrh0 = QIB_LRH_GRH;
690         }
691         lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
692                 qp->remote_ah_attr.sl << 4;
693         qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
694         qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
695         qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
696         qp->s_hdr.lrh[3] = cpu_to_be16(ppd_from_ibp(ibp)->lid |
697                                        qp->remote_ah_attr.src_path_bits);
698         bth0 |= qib_get_pkey(ibp, qp->s_pkey_index);
699         bth0 |= extra_bytes << 20;
700         if (qp->s_mig_state == IB_MIG_MIGRATED)
701                 bth0 |= IB_BTH_MIG_REQ;
702         ohdr->bth[0] = cpu_to_be32(bth0);
703         ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
704         ohdr->bth[2] = cpu_to_be32(bth2);
705 }
706
707 /**
708  * qib_do_send - perform a send on a QP
709  * @work: contains a pointer to the QP
710  *
711  * Process entries in the send work queue until credit or queue is
712  * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
713  * Otherwise, two threads could send packets out of order.
714  */
715 void qib_do_send(struct work_struct *work)
716 {
717         struct qib_qp *qp = container_of(work, struct qib_qp, s_work);
718         struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
719         struct qib_pportdata *ppd = ppd_from_ibp(ibp);
720         int (*make_req)(struct qib_qp *qp);
721         unsigned long flags;
722
723         if ((qp->ibqp.qp_type == IB_QPT_RC ||
724              qp->ibqp.qp_type == IB_QPT_UC) &&
725             (qp->remote_ah_attr.dlid & ~((1 << ppd->lmc) - 1)) == ppd->lid) {
726                 qib_ruc_loopback(qp);
727                 return;
728         }
729
730         if (qp->ibqp.qp_type == IB_QPT_RC)
731                 make_req = qib_make_rc_req;
732         else if (qp->ibqp.qp_type == IB_QPT_UC)
733                 make_req = qib_make_uc_req;
734         else
735                 make_req = qib_make_ud_req;
736
737         spin_lock_irqsave(&qp->s_lock, flags);
738
739         /* Return if we are already busy processing a work request. */
740         if (!qib_send_ok(qp)) {
741                 spin_unlock_irqrestore(&qp->s_lock, flags);
742                 return;
743         }
744
745         qp->s_flags |= QIB_S_BUSY;
746
747         spin_unlock_irqrestore(&qp->s_lock, flags);
748
749         do {
750                 /* Check for a constructed packet to be sent. */
751                 if (qp->s_hdrwords != 0) {
752                         /*
753                          * If the packet cannot be sent now, return and
754                          * the send tasklet will be woken up later.
755                          */
756                         if (qib_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
757                                            qp->s_cur_sge, qp->s_cur_size))
758                                 break;
759                         /* Record that s_hdr is empty. */
760                         qp->s_hdrwords = 0;
761                 }
762         } while (make_req(qp));
763 }
764
765 /*
766  * This should be called with s_lock held.
767  */
768 void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
769                        enum ib_wc_status status)
770 {
771         u32 old_last, last;
772         unsigned i;
773
774         if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
775                 return;
776
777         for (i = 0; i < wqe->wr.num_sge; i++) {
778                 struct qib_sge *sge = &wqe->sg_list[i];
779
780                 atomic_dec(&sge->mr->refcount);
781         }
782         if (qp->ibqp.qp_type == IB_QPT_UD ||
783             qp->ibqp.qp_type == IB_QPT_SMI ||
784             qp->ibqp.qp_type == IB_QPT_GSI)
785                 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
786
787         /* See ch. 11.2.4.1 and 10.7.3.1 */
788         if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
789             (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
790             status != IB_WC_SUCCESS) {
791                 struct ib_wc wc;
792
793                 memset(&wc, 0, sizeof wc);
794                 wc.wr_id = wqe->wr.wr_id;
795                 wc.status = status;
796                 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
797                 wc.qp = &qp->ibqp;
798                 if (status == IB_WC_SUCCESS)
799                         wc.byte_len = wqe->length;
800                 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
801                              status != IB_WC_SUCCESS);
802         }
803
804         last = qp->s_last;
805         old_last = last;
806         if (++last >= qp->s_size)
807                 last = 0;
808         qp->s_last = last;
809         if (qp->s_acked == old_last)
810                 qp->s_acked = last;
811         if (qp->s_cur == old_last)
812                 qp->s_cur = last;
813         if (qp->s_tail == old_last)
814                 qp->s_tail = last;
815         if (qp->state == IB_QPS_SQD && last == qp->s_cur)
816                 qp->s_draining = 0;
817 }