2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <rdma/ib_smi.h>
40 * qib_ud_loopback - handle send on loopback QPs
41 * @sqp: the sending QP
42 * @swqe: the send work request
44 * This is called from qib_make_ud_req() to forward a WQE addressed
46 * Note that the receive interrupt handler may be calling qib_ud_rcv()
47 * while this is being called.
49 static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
51 struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
52 struct qib_pportdata *ppd;
54 struct ib_ah_attr *ah_attr;
56 struct qib_sge_state ssge;
60 enum ib_qp_type sqptype, dqptype;
62 qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
68 sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
69 IB_QPT_UD : sqp->ibqp.qp_type;
70 dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
71 IB_QPT_UD : qp->ibqp.qp_type;
73 if (dqptype != sqptype ||
74 !(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
79 ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
80 ppd = ppd_from_ibp(ibp);
82 if (qp->ibqp.qp_num > 1) {
87 pkey1 = qib_get_pkey(ibp, sqp->s_pkey_index);
88 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
89 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
90 lid = ppd->lid | (ah_attr->src_path_bits &
91 ((1 << ppd->lmc) - 1));
92 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY, pkey1,
94 sqp->ibqp.qp_num, qp->ibqp.qp_num,
96 cpu_to_be16(ah_attr->dlid));
102 * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
103 * Qkeys with the high order bit set mean use the
104 * qkey from the QP context instead of the WR (see 10.2.5).
106 if (qp->ibqp.qp_num) {
109 qkey = (int)swqe->wr.wr.ud.remote_qkey < 0 ?
110 sqp->qkey : swqe->wr.wr.ud.remote_qkey;
111 if (unlikely(qkey != qp->qkey)) {
114 lid = ppd->lid | (ah_attr->src_path_bits &
115 ((1 << ppd->lmc) - 1));
116 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
118 sqp->ibqp.qp_num, qp->ibqp.qp_num,
120 cpu_to_be16(ah_attr->dlid));
126 * A GRH is expected to precede the data even if not
127 * present on the wire.
129 length = swqe->length;
130 memset(&wc, 0, sizeof wc);
131 wc.byte_len = length + sizeof(struct ib_grh);
133 if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
134 wc.wc_flags = IB_WC_WITH_IMM;
135 wc.ex.imm_data = swqe->wr.ex.imm_data;
138 spin_lock_irqsave(&qp->r_lock, flags);
141 * Get the next work request entry to find where to put the data.
143 if (qp->r_flags & QIB_R_REUSE_SGE)
144 qp->r_flags &= ~QIB_R_REUSE_SGE;
148 ret = qib_get_rwqe(qp, 0);
150 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
154 if (qp->ibqp.qp_num == 0)
155 ibp->n_vl15_dropped++;
159 /* Silently drop packets which are too big. */
160 if (unlikely(wc.byte_len > qp->r_len)) {
161 qp->r_flags |= QIB_R_REUSE_SGE;
166 if (ah_attr->ah_flags & IB_AH_GRH) {
167 qib_copy_sge(&qp->r_sge, &ah_attr->grh,
168 sizeof(struct ib_grh), 1);
169 wc.wc_flags |= IB_WC_GRH;
171 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
172 ssge.sg_list = swqe->sg_list + 1;
173 ssge.sge = *swqe->sg_list;
174 ssge.num_sge = swqe->wr.num_sge;
177 u32 len = sge->length;
181 if (len > sge->sge_length)
182 len = sge->sge_length;
184 qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
187 sge->sge_length -= len;
188 if (sge->sge_length == 0) {
190 *sge = *ssge.sg_list++;
191 } else if (sge->length == 0 && sge->mr->lkey) {
192 if (++sge->n >= QIB_SEGSZ) {
193 if (++sge->m >= sge->mr->mapsz)
198 sge->mr->map[sge->m]->segs[sge->n].vaddr;
200 sge->mr->map[sge->m]->segs[sge->n].length;
204 while (qp->r_sge.num_sge) {
205 atomic_dec(&qp->r_sge.sge.mr->refcount);
206 if (--qp->r_sge.num_sge)
207 qp->r_sge.sge = *qp->r_sge.sg_list++;
209 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
211 wc.wr_id = qp->r_wr_id;
212 wc.status = IB_WC_SUCCESS;
213 wc.opcode = IB_WC_RECV;
215 wc.src_qp = sqp->ibqp.qp_num;
216 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
217 swqe->wr.wr.ud.pkey_index : 0;
218 wc.slid = ppd->lid | (ah_attr->src_path_bits & ((1 << ppd->lmc) - 1));
220 wc.dlid_path_bits = ah_attr->dlid & ((1 << ppd->lmc) - 1);
221 wc.port_num = qp->port_num;
222 /* Signal completion event if the solicited bit is set. */
223 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
224 swqe->wr.send_flags & IB_SEND_SOLICITED);
227 spin_unlock_irqrestore(&qp->r_lock, flags);
229 if (atomic_dec_and_test(&qp->refcount))
234 * qib_make_ud_req - construct a UD request packet
237 * Return 1 if constructed; otherwise, return 0.
239 int qib_make_ud_req(struct qib_qp *qp)
241 struct qib_other_headers *ohdr;
242 struct ib_ah_attr *ah_attr;
243 struct qib_pportdata *ppd;
244 struct qib_ibport *ibp;
245 struct qib_swqe *wqe;
255 spin_lock_irqsave(&qp->s_lock, flags);
257 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) {
258 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
260 /* We are in the error state, flush the work request. */
261 if (qp->s_last == qp->s_head)
263 /* If DMAs are in progress, we can't flush immediately. */
264 if (atomic_read(&qp->s_dma_busy)) {
265 qp->s_flags |= QIB_S_WAIT_DMA;
268 wqe = get_swqe_ptr(qp, qp->s_last);
269 qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
273 if (qp->s_cur == qp->s_head)
276 wqe = get_swqe_ptr(qp, qp->s_cur);
277 next_cur = qp->s_cur + 1;
278 if (next_cur >= qp->s_size)
281 /* Construct the header. */
282 ibp = to_iport(qp->ibqp.device, qp->port_num);
283 ppd = ppd_from_ibp(ibp);
284 ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
285 if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE) {
286 if (ah_attr->dlid != QIB_PERMISSIVE_LID)
287 ibp->n_multicast_xmit++;
289 ibp->n_unicast_xmit++;
291 ibp->n_unicast_xmit++;
292 lid = ah_attr->dlid & ~((1 << ppd->lmc) - 1);
293 if (unlikely(lid == ppd->lid)) {
295 * If DMAs are in progress, we can't generate
296 * a completion for the loopback packet since
297 * it would be out of order.
298 * XXX Instead of waiting, we could queue a
299 * zero length descriptor so we get a callback.
301 if (atomic_read(&qp->s_dma_busy)) {
302 qp->s_flags |= QIB_S_WAIT_DMA;
305 qp->s_cur = next_cur;
306 spin_unlock_irqrestore(&qp->s_lock, flags);
307 qib_ud_loopback(qp, wqe);
308 spin_lock_irqsave(&qp->s_lock, flags);
309 qib_send_complete(qp, wqe, IB_WC_SUCCESS);
314 qp->s_cur = next_cur;
315 extra_bytes = -wqe->length & 3;
316 nwords = (wqe->length + extra_bytes) >> 2;
318 /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
320 qp->s_cur_size = wqe->length;
321 qp->s_cur_sge = &qp->s_sge;
322 qp->s_srate = ah_attr->static_rate;
324 qp->s_sge.sge = wqe->sg_list[0];
325 qp->s_sge.sg_list = wqe->sg_list + 1;
326 qp->s_sge.num_sge = wqe->wr.num_sge;
327 qp->s_sge.total_len = wqe->length;
329 if (ah_attr->ah_flags & IB_AH_GRH) {
330 /* Header size in 32-bit words. */
331 qp->s_hdrwords += qib_make_grh(ibp, &qp->s_hdr.u.l.grh,
333 qp->s_hdrwords, nwords);
335 ohdr = &qp->s_hdr.u.l.oth;
337 * Don't worry about sending to locally attached multicast
338 * QPs. It is unspecified by the spec. what happens.
341 /* Header size in 32-bit words. */
343 ohdr = &qp->s_hdr.u.oth;
345 if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
347 ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
348 bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
350 bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
351 lrh0 |= ah_attr->sl << 4;
352 if (qp->ibqp.qp_type == IB_QPT_SMI)
353 lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
355 lrh0 |= ibp->sl_to_vl[ah_attr->sl] << 12;
356 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
357 qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
358 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
361 lid |= ah_attr->src_path_bits & ((1 << ppd->lmc) - 1);
362 qp->s_hdr.lrh[3] = cpu_to_be16(lid);
364 qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
365 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
366 bth0 |= IB_BTH_SOLICITED;
367 bth0 |= extra_bytes << 20;
368 bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? QIB_DEFAULT_P_KEY :
369 qib_get_pkey(ibp, qp->ibqp.qp_type == IB_QPT_GSI ?
370 wqe->wr.wr.ud.pkey_index : qp->s_pkey_index);
371 ohdr->bth[0] = cpu_to_be32(bth0);
373 * Use the multicast QP if the destination LID is a multicast LID.
375 ohdr->bth[1] = ah_attr->dlid >= QIB_MULTICAST_LID_BASE &&
376 ah_attr->dlid != QIB_PERMISSIVE_LID ?
377 cpu_to_be32(QIB_MULTICAST_QPN) :
378 cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
379 ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & QIB_PSN_MASK);
381 * Qkeys with the high order bit set mean use the
382 * qkey from the QP context instead of the WR (see 10.2.5).
384 ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
385 qp->qkey : wqe->wr.wr.ud.remote_qkey);
386 ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
393 qp->s_flags &= ~QIB_S_BUSY;
395 spin_unlock_irqrestore(&qp->s_lock, flags);
399 static unsigned qib_lookup_pkey(struct qib_ibport *ibp, u16 pkey)
401 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
402 struct qib_devdata *dd = ppd->dd;
403 unsigned ctxt = ppd->hw_pidx;
406 pkey &= 0x7fff; /* remove limited/full membership bit */
408 for (i = 0; i < ARRAY_SIZE(dd->rcd[ctxt]->pkeys); ++i)
409 if ((dd->rcd[ctxt]->pkeys[i] & 0x7fff) == pkey)
413 * Should not get here, this means hardware failed to validate pkeys.
414 * Punt and return index 0.
420 * qib_ud_rcv - receive an incoming UD packet
421 * @ibp: the port the packet came in on
422 * @hdr: the packet header
423 * @has_grh: true if the packet has a GRH
424 * @data: the packet data
425 * @tlen: the packet length
426 * @qp: the QP the packet came on
428 * This is called from qib_qp_rcv() to process an incoming UD packet
430 * Called at interrupt level.
432 void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
433 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
435 struct qib_other_headers *ohdr;
447 hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
449 ohdr = &hdr->u.l.oth;
450 hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
452 qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
453 src_qp = be32_to_cpu(ohdr->u.ud.deth[1]) & QIB_QPN_MASK;
456 * Get the number of bytes the message was padded by
457 * and drop incomplete packets.
459 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
460 if (unlikely(tlen < (hdrsize + pad + 4)))
463 tlen -= hdrsize + pad + 4;
466 * Check that the permissive LID is only used on QP0
467 * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
469 if (qp->ibqp.qp_num) {
470 if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
471 hdr->lrh[3] == IB_LID_PERMISSIVE))
473 if (qp->ibqp.qp_num > 1) {
476 pkey1 = be32_to_cpu(ohdr->bth[0]);
477 pkey2 = qib_get_pkey(ibp, qp->s_pkey_index);
478 if (unlikely(!qib_pkey_ok(pkey1, pkey2))) {
479 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_PKEY,
481 (be16_to_cpu(hdr->lrh[0]) >> 4) &
483 src_qp, qp->ibqp.qp_num,
484 hdr->lrh[3], hdr->lrh[1]);
488 if (unlikely(qkey != qp->qkey)) {
489 qib_bad_pqkey(ibp, IB_NOTICE_TRAP_BAD_QKEY, qkey,
490 (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF,
491 src_qp, qp->ibqp.qp_num,
492 hdr->lrh[3], hdr->lrh[1]);
495 /* Drop invalid MAD packets (see 13.5.3.1). */
496 if (unlikely(qp->ibqp.qp_num == 1 &&
498 (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))
503 /* Drop invalid MAD packets (see 13.5.3.1). */
504 if (tlen != 256 || (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)
506 smp = (struct ib_smp *) data;
507 if ((hdr->lrh[1] == IB_LID_PERMISSIVE ||
508 hdr->lrh[3] == IB_LID_PERMISSIVE) &&
509 smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
514 * The opcode is in the low byte when its in network order
515 * (top byte when in host order).
517 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
518 if (qp->ibqp.qp_num > 1 &&
519 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
520 wc.ex.imm_data = ohdr->u.ud.imm_data;
521 wc.wc_flags = IB_WC_WITH_IMM;
523 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
530 * A GRH is expected to precede the data even if not
531 * present on the wire.
533 wc.byte_len = tlen + sizeof(struct ib_grh);
536 * Get the next work request entry to find where to put the data.
538 if (qp->r_flags & QIB_R_REUSE_SGE)
539 qp->r_flags &= ~QIB_R_REUSE_SGE;
543 ret = qib_get_rwqe(qp, 0);
545 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
549 if (qp->ibqp.qp_num == 0)
550 ibp->n_vl15_dropped++;
554 /* Silently drop packets which are too big. */
555 if (unlikely(wc.byte_len > qp->r_len)) {
556 qp->r_flags |= QIB_R_REUSE_SGE;
560 qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
561 sizeof(struct ib_grh), 1);
562 wc.wc_flags |= IB_WC_GRH;
564 qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1);
565 qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
566 while (qp->r_sge.num_sge) {
567 atomic_dec(&qp->r_sge.sge.mr->refcount);
568 if (--qp->r_sge.num_sge)
569 qp->r_sge.sge = *qp->r_sge.sg_list++;
571 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
573 wc.wr_id = qp->r_wr_id;
574 wc.status = IB_WC_SUCCESS;
575 wc.opcode = IB_WC_RECV;
579 wc.pkey_index = qp->ibqp.qp_type == IB_QPT_GSI ?
580 qib_lookup_pkey(ibp, be32_to_cpu(ohdr->bth[0])) : 0;
581 wc.slid = be16_to_cpu(hdr->lrh[3]);
582 wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
583 dlid = be16_to_cpu(hdr->lrh[1]);
585 * Save the LMC lower bits if the destination LID is a unicast LID.
587 wc.dlid_path_bits = dlid >= QIB_MULTICAST_LID_BASE ? 0 :
588 dlid & ((1 << ppd_from_ibp(ibp)->lmc) - 1);
589 wc.port_num = qp->port_num;
590 /* Signal completion event if the solicited bit is set. */
591 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
593 cpu_to_be32(IB_BTH_SOLICITED)) != 0);