2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 /* cut down ridiculously long IB macro names */
39 #define OP(x) IB_OPCODE_RC_##x
41 static void rc_timeout(unsigned long arg);
43 static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe,
48 len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
49 ss->sge = wqe->sg_list[0];
50 ss->sg_list = wqe->sg_list + 1;
51 ss->num_sge = wqe->wr.num_sge;
52 ss->total_len = wqe->length;
53 qib_skip_sge(ss, len, 0);
54 return wqe->length - len;
57 static void start_timer(struct qib_qp *qp)
59 qp->s_flags |= QIB_S_TIMER;
60 qp->s_timer.function = rc_timeout;
61 /* 4.096 usec. * (1 << qp->timeout) */
62 qp->s_timer.expires = jiffies + qp->timeout_jiffies;
63 add_timer(&qp->s_timer);
67 * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
68 * @dev: the device for this QP
69 * @qp: a pointer to the QP
70 * @ohdr: a pointer to the IB header being constructed
73 * Return 1 if constructed; otherwise, return 0.
74 * Note that we are in the responder's side of the QP context.
75 * Note the QP s_lock must be held.
77 static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp,
78 struct qib_other_headers *ohdr, u32 pmtu)
80 struct qib_ack_entry *e;
86 /* Don't send an ACK if we aren't supposed to. */
87 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
90 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
93 switch (qp->s_ack_state) {
94 case OP(RDMA_READ_RESPONSE_LAST):
95 case OP(RDMA_READ_RESPONSE_ONLY):
96 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
98 atomic_dec(&e->rdma_sge.mr->refcount);
99 e->rdma_sge.mr = NULL;
102 case OP(ATOMIC_ACKNOWLEDGE):
104 * We can increment the tail pointer now that the last
105 * response has been sent instead of only being
108 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC)
109 qp->s_tail_ack_queue = 0;
112 case OP(ACKNOWLEDGE):
113 /* Check for no next entry in the queue. */
114 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
115 if (qp->s_flags & QIB_S_ACK_PENDING)
120 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
121 if (e->opcode == OP(RDMA_READ_REQUEST)) {
123 * If a RDMA read response is being resent and
124 * we haven't seen the duplicate request yet,
125 * then stop sending the remaining responses the
126 * responder has seen until the requester resends it.
128 len = e->rdma_sge.sge_length;
129 if (len && !e->rdma_sge.mr) {
130 qp->s_tail_ack_queue = qp->r_head_ack_queue;
133 /* Copy SGE state in case we need to resend */
134 qp->s_rdma_mr = e->rdma_sge.mr;
136 atomic_inc(&qp->s_rdma_mr->refcount);
137 qp->s_ack_rdma_sge.sge = e->rdma_sge;
138 qp->s_ack_rdma_sge.num_sge = 1;
139 qp->s_cur_sge = &qp->s_ack_rdma_sge;
142 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
144 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
147 ohdr->u.aeth = qib_compute_aeth(qp);
149 qp->s_ack_rdma_psn = e->psn;
150 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
152 /* COMPARE_SWAP or FETCH_ADD */
153 qp->s_cur_sge = NULL;
155 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
156 ohdr->u.at.aeth = qib_compute_aeth(qp);
157 ohdr->u.at.atomic_ack_eth[0] =
158 cpu_to_be32(e->atomic_data >> 32);
159 ohdr->u.at.atomic_ack_eth[1] =
160 cpu_to_be32(e->atomic_data);
161 hwords += sizeof(ohdr->u.at) / sizeof(u32);
162 bth2 = e->psn & QIB_PSN_MASK;
165 bth0 = qp->s_ack_state << 24;
168 case OP(RDMA_READ_RESPONSE_FIRST):
169 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
171 case OP(RDMA_READ_RESPONSE_MIDDLE):
172 qp->s_cur_sge = &qp->s_ack_rdma_sge;
173 qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr;
175 atomic_inc(&qp->s_rdma_mr->refcount);
176 len = qp->s_ack_rdma_sge.sge.sge_length;
180 ohdr->u.aeth = qib_compute_aeth(qp);
182 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
183 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
186 bth0 = qp->s_ack_state << 24;
187 bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK;
193 * Send a regular ACK.
194 * Set the s_ack_state so we wait until after sending
195 * the ACK before setting s_ack_state to ACKNOWLEDGE
198 qp->s_ack_state = OP(SEND_ONLY);
199 qp->s_flags &= ~QIB_S_ACK_PENDING;
200 qp->s_cur_sge = NULL;
203 cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
205 QIB_AETH_CREDIT_SHIFT));
207 ohdr->u.aeth = qib_compute_aeth(qp);
210 bth0 = OP(ACKNOWLEDGE) << 24;
211 bth2 = qp->s_ack_psn & QIB_PSN_MASK;
213 qp->s_rdma_ack_cnt++;
214 qp->s_hdrwords = hwords;
215 qp->s_cur_size = len;
216 qib_make_ruc_header(qp, ohdr, bth0, bth2);
220 qp->s_ack_state = OP(ACKNOWLEDGE);
221 qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING);
226 * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
227 * @qp: a pointer to the QP
229 * Return 1 if constructed; otherwise, return 0.
231 int qib_make_rc_req(struct qib_qp *qp)
233 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
234 struct qib_other_headers *ohdr;
235 struct qib_sge_state *ss;
236 struct qib_swqe *wqe;
247 ohdr = &qp->s_hdr.u.oth;
248 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
249 ohdr = &qp->s_hdr.u.l.oth;
252 * The lock is needed to synchronize between the sending tasklet,
253 * the receive interrupt handler, and timeout resends.
255 spin_lock_irqsave(&qp->s_lock, flags);
257 /* Sending responses has higher priority over sending requests. */
258 if ((qp->s_flags & QIB_S_RESP_PENDING) &&
259 qib_make_rc_ack(dev, qp, ohdr, pmtu))
262 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) {
263 if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND))
265 /* We are in the error state, flush the work request. */
266 if (qp->s_last == qp->s_head)
268 /* If DMAs are in progress, we can't flush immediately. */
269 if (atomic_read(&qp->s_dma_busy)) {
270 qp->s_flags |= QIB_S_WAIT_DMA;
273 wqe = get_swqe_ptr(qp, qp->s_last);
274 qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
275 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
276 /* will get called again */
280 if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK))
283 if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) {
284 if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
285 qp->s_flags |= QIB_S_WAIT_PSN;
288 qp->s_sending_psn = qp->s_psn;
289 qp->s_sending_hpsn = qp->s_psn - 1;
292 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
296 /* Send a request. */
297 wqe = get_swqe_ptr(qp, qp->s_cur);
298 switch (qp->s_state) {
300 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK))
303 * Resend an old request or start a new one.
305 * We keep track of the current SWQE so that
306 * we don't reset the "furthest progress" state
307 * if we need to back up.
310 if (qp->s_cur == qp->s_tail) {
311 /* Check if send work queue is empty. */
312 if (qp->s_tail == qp->s_head)
315 * If a fence is requested, wait for previous
316 * RDMA read and atomic operations to finish.
318 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
319 qp->s_num_rd_atomic) {
320 qp->s_flags |= QIB_S_WAIT_FENCE;
323 wqe->psn = qp->s_next_psn;
327 * Note that we have to be careful not to modify the
328 * original work request since we may need to resend
333 bth2 = qp->s_psn & QIB_PSN_MASK;
334 switch (wqe->wr.opcode) {
336 case IB_WR_SEND_WITH_IMM:
337 /* If no credit, return. */
338 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
339 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
340 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
343 wqe->lpsn = wqe->psn;
345 wqe->lpsn += (len - 1) / pmtu;
346 qp->s_state = OP(SEND_FIRST);
350 if (wqe->wr.opcode == IB_WR_SEND)
351 qp->s_state = OP(SEND_ONLY);
353 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
354 /* Immediate data comes after the BTH */
355 ohdr->u.imm_data = wqe->wr.ex.imm_data;
358 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
359 bth0 |= IB_BTH_SOLICITED;
360 bth2 |= IB_BTH_REQ_ACK;
361 if (++qp->s_cur == qp->s_size)
365 case IB_WR_RDMA_WRITE:
366 if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
368 goto no_flow_control;
369 case IB_WR_RDMA_WRITE_WITH_IMM:
370 /* If no credit, return. */
371 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) &&
372 qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
373 qp->s_flags |= QIB_S_WAIT_SSN_CREDIT;
377 ohdr->u.rc.reth.vaddr =
378 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
379 ohdr->u.rc.reth.rkey =
380 cpu_to_be32(wqe->wr.wr.rdma.rkey);
381 ohdr->u.rc.reth.length = cpu_to_be32(len);
382 hwords += sizeof(struct ib_reth) / sizeof(u32);
383 wqe->lpsn = wqe->psn;
385 wqe->lpsn += (len - 1) / pmtu;
386 qp->s_state = OP(RDMA_WRITE_FIRST);
390 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
391 qp->s_state = OP(RDMA_WRITE_ONLY);
394 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
395 /* Immediate data comes after RETH */
396 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
398 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
399 bth0 |= IB_BTH_SOLICITED;
401 bth2 |= IB_BTH_REQ_ACK;
402 if (++qp->s_cur == qp->s_size)
406 case IB_WR_RDMA_READ:
408 * Don't allow more operations to be started
409 * than the QP limits allow.
412 if (qp->s_num_rd_atomic >=
413 qp->s_max_rd_atomic) {
414 qp->s_flags |= QIB_S_WAIT_RDMAR;
417 qp->s_num_rd_atomic++;
418 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
421 * Adjust s_next_psn to count the
422 * expected number of responses.
425 qp->s_next_psn += (len - 1) / pmtu;
426 wqe->lpsn = qp->s_next_psn++;
428 ohdr->u.rc.reth.vaddr =
429 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
430 ohdr->u.rc.reth.rkey =
431 cpu_to_be32(wqe->wr.wr.rdma.rkey);
432 ohdr->u.rc.reth.length = cpu_to_be32(len);
433 qp->s_state = OP(RDMA_READ_REQUEST);
434 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
437 bth2 |= IB_BTH_REQ_ACK;
438 if (++qp->s_cur == qp->s_size)
442 case IB_WR_ATOMIC_CMP_AND_SWP:
443 case IB_WR_ATOMIC_FETCH_AND_ADD:
445 * Don't allow more operations to be started
446 * than the QP limits allow.
449 if (qp->s_num_rd_atomic >=
450 qp->s_max_rd_atomic) {
451 qp->s_flags |= QIB_S_WAIT_RDMAR;
454 qp->s_num_rd_atomic++;
455 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT))
457 wqe->lpsn = wqe->psn;
459 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
460 qp->s_state = OP(COMPARE_SWAP);
461 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
462 wqe->wr.wr.atomic.swap);
463 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
464 wqe->wr.wr.atomic.compare_add);
466 qp->s_state = OP(FETCH_ADD);
467 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
468 wqe->wr.wr.atomic.compare_add);
469 ohdr->u.atomic_eth.compare_data = 0;
471 ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
472 wqe->wr.wr.atomic.remote_addr >> 32);
473 ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
474 wqe->wr.wr.atomic.remote_addr);
475 ohdr->u.atomic_eth.rkey = cpu_to_be32(
476 wqe->wr.wr.atomic.rkey);
477 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
480 bth2 |= IB_BTH_REQ_ACK;
481 if (++qp->s_cur == qp->s_size)
488 qp->s_sge.sge = wqe->sg_list[0];
489 qp->s_sge.sg_list = wqe->sg_list + 1;
490 qp->s_sge.num_sge = wqe->wr.num_sge;
491 qp->s_sge.total_len = wqe->length;
492 qp->s_len = wqe->length;
495 if (qp->s_tail >= qp->s_size)
498 if (wqe->wr.opcode == IB_WR_RDMA_READ)
499 qp->s_psn = wqe->lpsn + 1;
502 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
503 qp->s_next_psn = qp->s_psn;
507 case OP(RDMA_READ_RESPONSE_FIRST):
509 * qp->s_state is normally set to the opcode of the
510 * last packet constructed for new requests and therefore
511 * is never set to RDMA read response.
512 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
513 * thread to indicate a SEND needs to be restarted from an
514 * earlier PSN without interferring with the sending thread.
515 * See qib_restart_rc().
517 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
520 qp->s_state = OP(SEND_MIDDLE);
522 case OP(SEND_MIDDLE):
523 bth2 = qp->s_psn++ & QIB_PSN_MASK;
524 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
525 qp->s_next_psn = qp->s_psn;
532 if (wqe->wr.opcode == IB_WR_SEND)
533 qp->s_state = OP(SEND_LAST);
535 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
536 /* Immediate data comes after the BTH */
537 ohdr->u.imm_data = wqe->wr.ex.imm_data;
540 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
541 bth0 |= IB_BTH_SOLICITED;
542 bth2 |= IB_BTH_REQ_ACK;
544 if (qp->s_cur >= qp->s_size)
548 case OP(RDMA_READ_RESPONSE_LAST):
550 * qp->s_state is normally set to the opcode of the
551 * last packet constructed for new requests and therefore
552 * is never set to RDMA read response.
553 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
554 * thread to indicate a RDMA write needs to be restarted from
555 * an earlier PSN without interferring with the sending thread.
556 * See qib_restart_rc().
558 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
560 case OP(RDMA_WRITE_FIRST):
561 qp->s_state = OP(RDMA_WRITE_MIDDLE);
563 case OP(RDMA_WRITE_MIDDLE):
564 bth2 = qp->s_psn++ & QIB_PSN_MASK;
565 if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0)
566 qp->s_next_psn = qp->s_psn;
573 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
574 qp->s_state = OP(RDMA_WRITE_LAST);
576 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
577 /* Immediate data comes after the BTH */
578 ohdr->u.imm_data = wqe->wr.ex.imm_data;
580 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
581 bth0 |= IB_BTH_SOLICITED;
583 bth2 |= IB_BTH_REQ_ACK;
585 if (qp->s_cur >= qp->s_size)
589 case OP(RDMA_READ_RESPONSE_MIDDLE):
591 * qp->s_state is normally set to the opcode of the
592 * last packet constructed for new requests and therefore
593 * is never set to RDMA read response.
594 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
595 * thread to indicate a RDMA read needs to be restarted from
596 * an earlier PSN without interferring with the sending thread.
597 * See qib_restart_rc().
599 len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu;
600 ohdr->u.rc.reth.vaddr =
601 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
602 ohdr->u.rc.reth.rkey =
603 cpu_to_be32(wqe->wr.wr.rdma.rkey);
604 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
605 qp->s_state = OP(RDMA_READ_REQUEST);
606 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
607 bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK;
608 qp->s_psn = wqe->lpsn + 1;
612 if (qp->s_cur == qp->s_size)
616 qp->s_sending_hpsn = bth2;
617 delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8;
618 if (delta && delta % QIB_PSN_CREDIT == 0)
619 bth2 |= IB_BTH_REQ_ACK;
620 if (qp->s_flags & QIB_S_SEND_ONE) {
621 qp->s_flags &= ~QIB_S_SEND_ONE;
622 qp->s_flags |= QIB_S_WAIT_ACK;
623 bth2 |= IB_BTH_REQ_ACK;
626 qp->s_hdrwords = hwords;
628 qp->s_cur_size = len;
629 qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2);
635 qp->s_flags &= ~QIB_S_BUSY;
637 spin_unlock_irqrestore(&qp->s_lock, flags);
642 * qib_send_rc_ack - Construct an ACK packet and send it
643 * @qp: a pointer to the QP
645 * This is called from qib_rc_rcv() and qib_kreceive().
646 * Note that RDMA reads and atomics are handled in the
647 * send side QP state and tasklet.
649 void qib_send_rc_ack(struct qib_qp *qp)
651 struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device);
652 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
653 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
660 struct qib_ib_header hdr;
661 struct qib_other_headers *ohdr;
665 spin_lock_irqsave(&qp->s_lock, flags);
667 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
670 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
671 if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt)
674 /* Construct the header with s_lock held so APM doesn't change it. */
677 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
679 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
680 hwords += qib_make_grh(ibp, &hdr.u.l.grh,
681 &qp->remote_ah_attr.grh, hwords, 0);
685 /* read pkey_index w/o lock (its atomic) */
686 bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24);
687 if (qp->s_mig_state == IB_MIG_MIGRATED)
688 bth0 |= IB_BTH_MIG_REQ;
690 ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) |
692 QIB_AETH_CREDIT_SHIFT));
694 ohdr->u.aeth = qib_compute_aeth(qp);
695 lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 |
696 qp->remote_ah_attr.sl << 4;
697 hdr.lrh[0] = cpu_to_be16(lrh0);
698 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
699 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
700 hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits);
701 ohdr->bth[0] = cpu_to_be32(bth0);
702 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
703 ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK);
705 spin_unlock_irqrestore(&qp->s_lock, flags);
707 /* Don't try to send ACKs if the link isn't ACTIVE */
708 if (!(ppd->lflags & QIBL_LINKACTIVE))
711 control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC,
712 qp->s_srate, lrh0 >> 12);
713 /* length is + 1 for the control dword */
714 pbc = ((u64) control << 32) | (hwords + 1);
716 piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn);
719 * We are out of PIO buffers at the moment.
720 * Pass responsibility for sending the ACK to the
721 * send tasklet so that when a PIO buffer becomes
722 * available, the ACK is sent ahead of other outgoing
725 spin_lock_irqsave(&qp->s_lock, flags);
731 * We have to flush after the PBC for correctness
732 * on some cpus or WC buffer can be written out of order.
736 if (dd->flags & QIB_PIO_FLUSH_WC) {
737 u32 *hdrp = (u32 *) &hdr;
740 qib_pio_copy(piobuf + 2, hdrp, hwords - 1);
742 __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
744 qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords);
746 if (dd->flags & QIB_USE_SPCL_TRIG) {
747 u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
750 __raw_writel(0xaebecede, piobuf + spcl_off);
754 qib_sendbuf_done(dd, pbufn);
756 ibp->n_unicast_xmit++;
760 if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
762 qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING;
763 qp->s_nak_state = qp->r_nak_state;
764 qp->s_ack_psn = qp->r_ack_psn;
766 /* Schedule the send tasklet. */
767 qib_schedule_send(qp);
770 spin_unlock_irqrestore(&qp->s_lock, flags);
776 * reset_psn - reset the QP state to send starting from PSN
778 * @psn: the packet sequence number to restart at
780 * This is called from qib_rc_rcv() to process an incoming RC ACK
782 * Called at interrupt level with the QP s_lock held.
784 static void reset_psn(struct qib_qp *qp, u32 psn)
787 struct qib_swqe *wqe = get_swqe_ptr(qp, n);
793 * If we are starting the request from the beginning,
794 * let the normal send code handle initialization.
796 if (qib_cmp24(psn, wqe->psn) <= 0) {
797 qp->s_state = OP(SEND_LAST);
801 /* Find the work request opcode corresponding to the given PSN. */
802 opcode = wqe->wr.opcode;
806 if (++n == qp->s_size)
810 wqe = get_swqe_ptr(qp, n);
811 diff = qib_cmp24(psn, wqe->psn);
816 * If we are starting the request from the beginning,
817 * let the normal send code handle initialization.
820 qp->s_state = OP(SEND_LAST);
823 opcode = wqe->wr.opcode;
827 * Set the state to restart in the middle of a request.
828 * Don't change the s_sge, s_cur_sge, or s_cur_size.
829 * See qib_make_rc_req().
833 case IB_WR_SEND_WITH_IMM:
834 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
837 case IB_WR_RDMA_WRITE:
838 case IB_WR_RDMA_WRITE_WITH_IMM:
839 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
842 case IB_WR_RDMA_READ:
843 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
848 * This case shouldn't happen since its only
851 qp->s_state = OP(SEND_LAST);
856 * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer
857 * asynchronously before the send tasklet can get scheduled.
858 * Doing it in qib_make_rc_req() is too late.
860 if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
861 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
862 qp->s_flags |= QIB_S_WAIT_PSN;
866 * Back up requester to resend the last un-ACKed request.
867 * The QP r_lock and s_lock should be held and interrupts disabled.
869 static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
871 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked);
872 struct qib_ibport *ibp;
874 if (qp->s_retry == 0) {
875 if (qp->s_mig_state == IB_MIG_ARMED) {
877 qp->s_retry = qp->s_retry_cnt;
878 } else if (qp->s_last == qp->s_acked) {
879 qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
880 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
882 } else /* XXX need to handle delayed completion */
887 ibp = to_iport(qp->ibqp.device, qp->port_num);
888 if (wqe->wr.opcode == IB_WR_RDMA_READ)
891 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
893 qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR |
894 QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN |
897 qp->s_flags |= QIB_S_SEND_ONE;
902 * This is called from s_timer for missing responses.
904 static void rc_timeout(unsigned long arg)
906 struct qib_qp *qp = (struct qib_qp *)arg;
907 struct qib_ibport *ibp;
910 spin_lock_irqsave(&qp->r_lock, flags);
911 spin_lock(&qp->s_lock);
912 if (qp->s_flags & QIB_S_TIMER) {
913 ibp = to_iport(qp->ibqp.device, qp->port_num);
914 ibp->n_rc_timeouts++;
915 qp->s_flags &= ~QIB_S_TIMER;
916 del_timer(&qp->s_timer);
917 qib_restart_rc(qp, qp->s_last_psn + 1, 1);
918 qib_schedule_send(qp);
920 spin_unlock(&qp->s_lock);
921 spin_unlock_irqrestore(&qp->r_lock, flags);
925 * This is called from s_timer for RNR timeouts.
927 void qib_rc_rnr_retry(unsigned long arg)
929 struct qib_qp *qp = (struct qib_qp *)arg;
932 spin_lock_irqsave(&qp->s_lock, flags);
933 if (qp->s_flags & QIB_S_WAIT_RNR) {
934 qp->s_flags &= ~QIB_S_WAIT_RNR;
935 del_timer(&qp->s_timer);
936 qib_schedule_send(qp);
938 spin_unlock_irqrestore(&qp->s_lock, flags);
942 * Set qp->s_sending_psn to the next PSN after the given one.
943 * This would be psn+1 except when RDMA reads are present.
945 static void reset_sending_psn(struct qib_qp *qp, u32 psn)
947 struct qib_swqe *wqe;
950 /* Find the work request corresponding to the given PSN. */
952 wqe = get_swqe_ptr(qp, n);
953 if (qib_cmp24(psn, wqe->lpsn) <= 0) {
954 if (wqe->wr.opcode == IB_WR_RDMA_READ)
955 qp->s_sending_psn = wqe->lpsn + 1;
957 qp->s_sending_psn = psn + 1;
960 if (++n == qp->s_size)
968 * This should be called with the QP s_lock held and interrupts disabled.
970 void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
972 struct qib_other_headers *ohdr;
973 struct qib_swqe *wqe;
979 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND))
982 /* Find out where the BTH is */
983 if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH)
986 ohdr = &hdr->u.l.oth;
988 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
989 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
990 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
991 WARN_ON(!qp->s_rdma_ack_cnt);
992 qp->s_rdma_ack_cnt--;
996 psn = be32_to_cpu(ohdr->bth[2]);
997 reset_sending_psn(qp, psn);
1000 * Start timer after a packet requesting an ACK has been sent and
1001 * there are still requests that haven't been acked.
1003 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1004 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
1005 (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1008 while (qp->s_last != qp->s_acked) {
1009 wqe = get_swqe_ptr(qp, qp->s_last);
1010 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1011 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1013 for (i = 0; i < wqe->wr.num_sge; i++) {
1014 struct qib_sge *sge = &wqe->sg_list[i];
1016 atomic_dec(&sge->mr->refcount);
1018 /* Post a send completion queue entry if requested. */
1019 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1020 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1021 memset(&wc, 0, sizeof wc);
1022 wc.wr_id = wqe->wr.wr_id;
1023 wc.status = IB_WC_SUCCESS;
1024 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1025 wc.byte_len = wqe->length;
1027 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
1029 if (++qp->s_last >= qp->s_size)
1033 * If we were waiting for sends to complete before resending,
1034 * and they are now complete, restart sending.
1036 if (qp->s_flags & QIB_S_WAIT_PSN &&
1037 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1038 qp->s_flags &= ~QIB_S_WAIT_PSN;
1039 qp->s_sending_psn = qp->s_psn;
1040 qp->s_sending_hpsn = qp->s_psn - 1;
1041 qib_schedule_send(qp);
1045 static inline void update_last_psn(struct qib_qp *qp, u32 psn)
1047 qp->s_last_psn = psn;
1051 * Generate a SWQE completion.
1052 * This is similar to qib_send_complete but has to check to be sure
1053 * that the SGEs are not being referenced if the SWQE is being resent.
1055 static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
1056 struct qib_swqe *wqe,
1057 struct qib_ibport *ibp)
1063 * Don't decrement refcount and don't generate a
1064 * completion if the SWQE is being resent until the send
1067 if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 ||
1068 qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1069 for (i = 0; i < wqe->wr.num_sge; i++) {
1070 struct qib_sge *sge = &wqe->sg_list[i];
1072 atomic_dec(&sge->mr->refcount);
1074 /* Post a send completion queue entry if requested. */
1075 if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
1076 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
1077 memset(&wc, 0, sizeof wc);
1078 wc.wr_id = wqe->wr.wr_id;
1079 wc.status = IB_WC_SUCCESS;
1080 wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
1081 wc.byte_len = wqe->length;
1083 qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
1085 if (++qp->s_last >= qp->s_size)
1088 ibp->n_rc_delayed_comp++;
1090 qp->s_retry = qp->s_retry_cnt;
1091 update_last_psn(qp, wqe->lpsn);
1094 * If we are completing a request which is in the process of
1095 * being resent, we can stop resending it since we know the
1096 * responder has already seen it.
1098 if (qp->s_acked == qp->s_cur) {
1099 if (++qp->s_cur >= qp->s_size)
1101 qp->s_acked = qp->s_cur;
1102 wqe = get_swqe_ptr(qp, qp->s_cur);
1103 if (qp->s_acked != qp->s_tail) {
1104 qp->s_state = OP(SEND_LAST);
1105 qp->s_psn = wqe->psn;
1108 if (++qp->s_acked >= qp->s_size)
1110 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1112 wqe = get_swqe_ptr(qp, qp->s_acked);
1118 * do_rc_ack - process an incoming RC ACK
1119 * @qp: the QP the ACK came in on
1120 * @psn: the packet sequence number of the ACK
1121 * @opcode: the opcode of the request that resulted in the ACK
1123 * This is called from qib_rc_rcv_resp() to process an incoming RC ACK
1125 * Called at interrupt level with the QP s_lock held.
1126 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1128 static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode,
1129 u64 val, struct qib_ctxtdata *rcd)
1131 struct qib_ibport *ibp;
1132 enum ib_wc_status status;
1133 struct qib_swqe *wqe;
1138 /* Remove QP from retry timer */
1139 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
1140 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
1141 del_timer(&qp->s_timer);
1145 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1146 * requests and implicitly NAK RDMA read and atomic requests issued
1147 * before the NAK'ed request. The MSN won't include the NAK'ed
1148 * request but will include an ACK'ed request(s).
1153 wqe = get_swqe_ptr(qp, qp->s_acked);
1154 ibp = to_iport(qp->ibqp.device, qp->port_num);
1157 * The MSN might be for a later WQE than the PSN indicates so
1158 * only complete WQEs that the PSN finishes.
1160 while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) {
1162 * RDMA_READ_RESPONSE_ONLY is a special case since
1163 * we want to generate completion events for everything
1164 * before the RDMA read, copy the data, then generate
1165 * the completion for the read.
1167 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1168 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1174 * If this request is a RDMA read or atomic, and the ACK is
1175 * for a later operation, this ACK NAKs the RDMA read or
1176 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1177 * can ACK a RDMA read and likewise for atomic ops. Note
1178 * that the NAK case can only happen if relaxed ordering is
1179 * used and requests are sent after an RDMA read or atomic
1180 * is sent but before the response is received.
1182 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1183 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1184 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1185 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1186 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1187 /* Retry this request. */
1188 if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) {
1189 qp->r_flags |= QIB_R_RDMAR_SEQ;
1190 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1191 if (list_empty(&qp->rspwait)) {
1192 qp->r_flags |= QIB_R_RSP_SEND;
1193 atomic_inc(&qp->refcount);
1194 list_add_tail(&qp->rspwait,
1195 &rcd->qp_wait_list);
1199 * No need to process the ACK/NAK since we are
1200 * restarting an earlier request.
1204 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1205 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1206 u64 *vaddr = wqe->sg_list[0].vaddr;
1209 if (qp->s_num_rd_atomic &&
1210 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1211 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1212 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1213 qp->s_num_rd_atomic--;
1214 /* Restart sending task if fence is complete */
1215 if ((qp->s_flags & QIB_S_WAIT_FENCE) &&
1216 !qp->s_num_rd_atomic) {
1217 qp->s_flags &= ~(QIB_S_WAIT_FENCE |
1219 qib_schedule_send(qp);
1220 } else if (qp->s_flags & QIB_S_WAIT_RDMAR) {
1221 qp->s_flags &= ~(QIB_S_WAIT_RDMAR |
1223 qib_schedule_send(qp);
1226 wqe = do_rc_completion(qp, wqe, ibp);
1227 if (qp->s_acked == qp->s_tail)
1231 switch (aeth >> 29) {
1234 if (qp->s_acked != qp->s_tail) {
1236 * We are expecting more ACKs so
1237 * reset the retransmit timer.
1241 * We can stop resending the earlier packets and
1242 * continue with the next packet the receiver wants.
1244 if (qib_cmp24(qp->s_psn, psn) <= 0)
1245 reset_psn(qp, psn + 1);
1246 } else if (qib_cmp24(qp->s_psn, psn) <= 0) {
1247 qp->s_state = OP(SEND_LAST);
1248 qp->s_psn = psn + 1;
1250 if (qp->s_flags & QIB_S_WAIT_ACK) {
1251 qp->s_flags &= ~QIB_S_WAIT_ACK;
1252 qib_schedule_send(qp);
1254 qib_get_credit(qp, aeth);
1255 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1256 qp->s_retry = qp->s_retry_cnt;
1257 update_last_psn(qp, psn);
1261 case 1: /* RNR NAK */
1263 if (qp->s_acked == qp->s_tail)
1265 if (qp->s_flags & QIB_S_WAIT_RNR)
1267 if (qp->s_rnr_retry == 0) {
1268 status = IB_WC_RNR_RETRY_EXC_ERR;
1271 if (qp->s_rnr_retry_cnt < 7)
1274 /* The last valid PSN is the previous PSN. */
1275 update_last_psn(qp, psn - 1);
1277 ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK;
1281 qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK);
1282 qp->s_flags |= QIB_S_WAIT_RNR;
1283 qp->s_timer.function = qib_rc_rnr_retry;
1284 qp->s_timer.expires = jiffies + usecs_to_jiffies(
1285 ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) &
1286 QIB_AETH_CREDIT_MASK]);
1287 add_timer(&qp->s_timer);
1291 if (qp->s_acked == qp->s_tail)
1293 /* The last valid PSN is the previous PSN. */
1294 update_last_psn(qp, psn - 1);
1295 switch ((aeth >> QIB_AETH_CREDIT_SHIFT) &
1296 QIB_AETH_CREDIT_MASK) {
1297 case 0: /* PSN sequence error */
1300 * Back up to the responder's expected PSN.
1301 * Note that we might get a NAK in the middle of an
1302 * RDMA READ response which terminates the RDMA
1305 qib_restart_rc(qp, psn, 0);
1306 qib_schedule_send(qp);
1309 case 1: /* Invalid Request */
1310 status = IB_WC_REM_INV_REQ_ERR;
1311 ibp->n_other_naks++;
1314 case 2: /* Remote Access Error */
1315 status = IB_WC_REM_ACCESS_ERR;
1316 ibp->n_other_naks++;
1319 case 3: /* Remote Operation Error */
1320 status = IB_WC_REM_OP_ERR;
1321 ibp->n_other_naks++;
1323 if (qp->s_last == qp->s_acked) {
1324 qib_send_complete(qp, wqe, status);
1325 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1330 /* Ignore other reserved NAK error codes */
1333 qp->s_retry = qp->s_retry_cnt;
1334 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1337 default: /* 2: reserved */
1339 /* Ignore reserved NAK codes. */
1348 * We have seen an out of sequence RDMA read middle or last packet.
1349 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1351 static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn,
1352 struct qib_ctxtdata *rcd)
1354 struct qib_swqe *wqe;
1356 /* Remove QP from retry timer */
1357 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
1358 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
1359 del_timer(&qp->s_timer);
1362 wqe = get_swqe_ptr(qp, qp->s_acked);
1364 while (qib_cmp24(psn, wqe->lpsn) > 0) {
1365 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1366 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1367 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1369 wqe = do_rc_completion(qp, wqe, ibp);
1373 qp->r_flags |= QIB_R_RDMAR_SEQ;
1374 qib_restart_rc(qp, qp->s_last_psn + 1, 0);
1375 if (list_empty(&qp->rspwait)) {
1376 qp->r_flags |= QIB_R_RSP_SEND;
1377 atomic_inc(&qp->refcount);
1378 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1383 * qib_rc_rcv_resp - process an incoming RC response packet
1384 * @ibp: the port this packet came in on
1385 * @ohdr: the other headers for this packet
1386 * @data: the packet data
1387 * @tlen: the packet length
1388 * @qp: the QP for this packet
1389 * @opcode: the opcode for this packet
1390 * @psn: the packet sequence number for this packet
1391 * @hdrsize: the header length
1392 * @pmtu: the path MTU
1394 * This is called from qib_rc_rcv() to process an incoming RC response
1395 * packet for the given QP.
1396 * Called at interrupt level.
1398 static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1399 struct qib_other_headers *ohdr,
1400 void *data, u32 tlen,
1403 u32 psn, u32 hdrsize, u32 pmtu,
1404 struct qib_ctxtdata *rcd)
1406 struct qib_swqe *wqe;
1407 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1408 enum ib_wc_status status;
1409 unsigned long flags;
1415 if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) {
1417 * If ACK'd PSN on SDMA busy list try to make progress to
1418 * reclaim SDMA credits.
1420 if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) &&
1421 (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) {
1424 * If send tasklet not running attempt to progress
1427 if (!(qp->s_flags & QIB_S_BUSY)) {
1428 /* Acquire SDMA Lock */
1429 spin_lock_irqsave(&ppd->sdma_lock, flags);
1430 /* Invoke sdma make progress */
1431 qib_sdma_make_progress(ppd);
1432 /* Release SDMA Lock */
1433 spin_unlock_irqrestore(&ppd->sdma_lock, flags);
1438 spin_lock_irqsave(&qp->s_lock, flags);
1439 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1442 /* Ignore invalid responses. */
1443 if (qib_cmp24(psn, qp->s_next_psn) >= 0)
1446 /* Ignore duplicate responses. */
1447 diff = qib_cmp24(psn, qp->s_last_psn);
1448 if (unlikely(diff <= 0)) {
1449 /* Update credits for "ghost" ACKs */
1450 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1451 aeth = be32_to_cpu(ohdr->u.aeth);
1452 if ((aeth >> 29) == 0)
1453 qib_get_credit(qp, aeth);
1459 * Skip everything other than the PSN we expect, if we are waiting
1460 * for a reply to a restarted RDMA read or atomic op.
1462 if (qp->r_flags & QIB_R_RDMAR_SEQ) {
1463 if (qib_cmp24(psn, qp->s_last_psn + 1) != 0)
1465 qp->r_flags &= ~QIB_R_RDMAR_SEQ;
1468 if (unlikely(qp->s_acked == qp->s_tail))
1470 wqe = get_swqe_ptr(qp, qp->s_acked);
1471 status = IB_WC_SUCCESS;
1474 case OP(ACKNOWLEDGE):
1475 case OP(ATOMIC_ACKNOWLEDGE):
1476 case OP(RDMA_READ_RESPONSE_FIRST):
1477 aeth = be32_to_cpu(ohdr->u.aeth);
1478 if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
1479 __be32 *p = ohdr->u.at.atomic_ack_eth;
1481 val = ((u64) be32_to_cpu(p[0]) << 32) |
1485 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1486 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1489 wqe = get_swqe_ptr(qp, qp->s_acked);
1490 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1493 * If this is a response to a resent RDMA read, we
1494 * have to be careful to copy the data to the right
1497 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1501 case OP(RDMA_READ_RESPONSE_MIDDLE):
1502 /* no AETH, no ACK */
1503 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1505 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1508 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1510 if (unlikely(pmtu >= qp->s_rdma_read_len))
1514 * We got a response so update the timeout.
1515 * 4.096 usec. * (1 << qp->timeout)
1517 qp->s_flags |= QIB_S_TIMER;
1518 mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies);
1519 if (qp->s_flags & QIB_S_WAIT_ACK) {
1520 qp->s_flags &= ~QIB_S_WAIT_ACK;
1521 qib_schedule_send(qp);
1524 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1525 qp->s_retry = qp->s_retry_cnt;
1528 * Update the RDMA receive state but do the copy w/o
1529 * holding the locks and blocking interrupts.
1531 qp->s_rdma_read_len -= pmtu;
1532 update_last_psn(qp, psn);
1533 spin_unlock_irqrestore(&qp->s_lock, flags);
1534 qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
1537 case OP(RDMA_READ_RESPONSE_ONLY):
1538 aeth = be32_to_cpu(ohdr->u.aeth);
1539 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1541 /* Get the number of bytes the message was padded by. */
1542 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1544 * Check that the data size is >= 0 && <= pmtu.
1545 * Remember to account for the AETH header (4) and
1548 if (unlikely(tlen < (hdrsize + pad + 8)))
1551 * If this is a response to a resent RDMA read, we
1552 * have to be careful to copy the data to the right
1555 wqe = get_swqe_ptr(qp, qp->s_acked);
1556 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1560 case OP(RDMA_READ_RESPONSE_LAST):
1561 /* ACKs READ req. */
1562 if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1)))
1564 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1566 /* Get the number of bytes the message was padded by. */
1567 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1569 * Check that the data size is >= 1 && <= pmtu.
1570 * Remember to account for the AETH header (4) and
1573 if (unlikely(tlen <= (hdrsize + pad + 8)))
1576 tlen -= hdrsize + pad + 8;
1577 if (unlikely(tlen != qp->s_rdma_read_len))
1579 aeth = be32_to_cpu(ohdr->u.aeth);
1580 qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
1581 WARN_ON(qp->s_rdma_read_sge.num_sge);
1582 (void) do_rc_ack(qp, aeth, psn,
1583 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1588 status = IB_WC_LOC_QP_OP_ERR;
1592 rdma_seq_err(qp, ibp, psn, rcd);
1596 status = IB_WC_LOC_LEN_ERR;
1598 if (qp->s_last == qp->s_acked) {
1599 qib_send_complete(qp, wqe, status);
1600 qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1603 spin_unlock_irqrestore(&qp->s_lock, flags);
1609 * qib_rc_rcv_error - process an incoming duplicate or error RC packet
1610 * @ohdr: the other headers for this packet
1611 * @data: the packet data
1612 * @qp: the QP for this packet
1613 * @opcode: the opcode for this packet
1614 * @psn: the packet sequence number for this packet
1615 * @diff: the difference between the PSN and the expected PSN
1617 * This is called from qib_rc_rcv() to process an unexpected
1618 * incoming RC packet for the given QP.
1619 * Called at interrupt level.
1620 * Return 1 if no more processing is needed; otherwise return 0 to
1621 * schedule a response to be sent.
1623 static int qib_rc_rcv_error(struct qib_other_headers *ohdr,
1629 struct qib_ctxtdata *rcd)
1631 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
1632 struct qib_ack_entry *e;
1633 unsigned long flags;
1639 * Packet sequence error.
1640 * A NAK will ACK earlier sends and RDMA writes.
1641 * Don't queue the NAK if we already sent one.
1643 if (!qp->r_nak_state) {
1645 qp->r_nak_state = IB_NAK_PSN_ERROR;
1646 /* Use the expected PSN. */
1647 qp->r_ack_psn = qp->r_psn;
1649 * Wait to send the sequence NAK until all packets
1650 * in the receive queue have been processed.
1651 * Otherwise, we end up propagating congestion.
1653 if (list_empty(&qp->rspwait)) {
1654 qp->r_flags |= QIB_R_RSP_NAK;
1655 atomic_inc(&qp->refcount);
1656 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1663 * Handle a duplicate request. Don't re-execute SEND, RDMA
1664 * write or atomic op. Don't NAK errors, just silently drop
1665 * the duplicate request. Note that r_sge, r_len, and
1666 * r_rcv_len may be in use so don't modify them.
1668 * We are supposed to ACK the earliest duplicate PSN but we
1669 * can coalesce an outstanding duplicate ACK. We have to
1670 * send the earliest so that RDMA reads can be restarted at
1671 * the requester's expected PSN.
1673 * First, find where this duplicate PSN falls within the
1674 * ACKs previously sent.
1675 * old_req is true if there is an older response that is scheduled
1676 * to be sent before sending this one.
1682 spin_lock_irqsave(&qp->s_lock, flags);
1684 for (i = qp->r_head_ack_queue; ; i = prev) {
1685 if (i == qp->s_tail_ack_queue)
1690 prev = QIB_MAX_RDMA_ATOMIC;
1691 if (prev == qp->r_head_ack_queue) {
1695 e = &qp->s_ack_queue[prev];
1700 if (qib_cmp24(psn, e->psn) >= 0) {
1701 if (prev == qp->s_tail_ack_queue &&
1702 qib_cmp24(psn, e->lpsn) <= 0)
1708 case OP(RDMA_READ_REQUEST): {
1709 struct ib_reth *reth;
1714 * If we didn't find the RDMA read request in the ack queue,
1715 * we can ignore this request.
1717 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1719 /* RETH comes after BTH */
1720 reth = &ohdr->u.rc.reth;
1722 * Address range must be a subset of the original
1723 * request and start on pmtu boundaries.
1724 * We reuse the old ack_queue slot since the requester
1725 * should not back up and request an earlier PSN for the
1728 offset = ((psn - e->psn) & QIB_PSN_MASK) *
1730 len = be32_to_cpu(reth->length);
1731 if (unlikely(offset + len != e->rdma_sge.sge_length))
1733 if (e->rdma_sge.mr) {
1734 atomic_dec(&e->rdma_sge.mr->refcount);
1735 e->rdma_sge.mr = NULL;
1738 u32 rkey = be32_to_cpu(reth->rkey);
1739 u64 vaddr = be64_to_cpu(reth->vaddr);
1742 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1743 IB_ACCESS_REMOTE_READ);
1747 e->rdma_sge.vaddr = NULL;
1748 e->rdma_sge.length = 0;
1749 e->rdma_sge.sge_length = 0;
1754 qp->s_tail_ack_queue = prev;
1758 case OP(COMPARE_SWAP):
1759 case OP(FETCH_ADD): {
1761 * If we didn't find the atomic request in the ack queue
1762 * or the send tasklet is already backed up to send an
1763 * earlier entry, we can ignore this request.
1765 if (!e || e->opcode != (u8) opcode || old_req)
1767 qp->s_tail_ack_queue = prev;
1773 * Ignore this operation if it doesn't request an ACK
1774 * or an earlier RDMA read or atomic is going to be resent.
1776 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1779 * Resend the most recent ACK if this request is
1780 * after all the previous RDMA reads and atomics.
1782 if (i == qp->r_head_ack_queue) {
1783 spin_unlock_irqrestore(&qp->s_lock, flags);
1784 qp->r_nak_state = 0;
1785 qp->r_ack_psn = qp->r_psn - 1;
1789 * Try to send a simple ACK to work around a Mellanox bug
1790 * which doesn't accept a RDMA read response or atomic
1791 * response as an ACK for earlier SENDs or RDMA writes.
1793 if (!(qp->s_flags & QIB_S_RESP_PENDING)) {
1794 spin_unlock_irqrestore(&qp->s_lock, flags);
1795 qp->r_nak_state = 0;
1796 qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
1800 * Resend the RDMA read or atomic op which
1801 * ACKs this duplicate request.
1803 qp->s_tail_ack_queue = i;
1806 qp->s_ack_state = OP(ACKNOWLEDGE);
1807 qp->s_flags |= QIB_S_RESP_PENDING;
1808 qp->r_nak_state = 0;
1809 qib_schedule_send(qp);
1812 spin_unlock_irqrestore(&qp->s_lock, flags);
1820 void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err)
1822 unsigned long flags;
1825 spin_lock_irqsave(&qp->s_lock, flags);
1826 lastwqe = qib_error_qp(qp, err);
1827 spin_unlock_irqrestore(&qp->s_lock, flags);
1832 ev.device = qp->ibqp.device;
1833 ev.element.qp = &qp->ibqp;
1834 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
1835 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1839 static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n)
1844 if (next > QIB_MAX_RDMA_ATOMIC)
1846 qp->s_tail_ack_queue = next;
1847 qp->s_ack_state = OP(ACKNOWLEDGE);
1851 * qib_rc_rcv - process an incoming RC packet
1852 * @rcd: the context pointer
1853 * @hdr: the header of this packet
1854 * @has_grh: true if the header has a GRH
1855 * @data: the packet data
1856 * @tlen: the packet length
1857 * @qp: the QP for this packet
1859 * This is called from qib_qp_rcv() to process an incoming RC packet
1861 * Called at interrupt level.
1863 void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr,
1864 int has_grh, void *data, u32 tlen, struct qib_qp *qp)
1866 struct qib_ibport *ibp = &rcd->ppd->ibport_data;
1867 struct qib_other_headers *ohdr;
1873 u32 pmtu = qp->pmtu;
1875 struct ib_reth *reth;
1876 unsigned long flags;
1882 hdrsize = 8 + 12; /* LRH + BTH */
1884 ohdr = &hdr->u.l.oth;
1885 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1888 opcode = be32_to_cpu(ohdr->bth[0]);
1889 if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode))
1892 psn = be32_to_cpu(ohdr->bth[2]);
1896 * Process responses (ACKs) before anything else. Note that the
1897 * packet sequence number will be for something in the send work
1898 * queue rather than the expected receive packet sequence number.
1899 * In other words, this QP is the requester.
1901 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1902 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1903 qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
1904 hdrsize, pmtu, rcd);
1908 /* Compute 24 bits worth of difference. */
1909 diff = qib_cmp24(psn, qp->r_psn);
1910 if (unlikely(diff)) {
1911 if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
1916 /* Check for opcode sequence errors. */
1917 switch (qp->r_state) {
1918 case OP(SEND_FIRST):
1919 case OP(SEND_MIDDLE):
1920 if (opcode == OP(SEND_MIDDLE) ||
1921 opcode == OP(SEND_LAST) ||
1922 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1926 case OP(RDMA_WRITE_FIRST):
1927 case OP(RDMA_WRITE_MIDDLE):
1928 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1929 opcode == OP(RDMA_WRITE_LAST) ||
1930 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1935 if (opcode == OP(SEND_MIDDLE) ||
1936 opcode == OP(SEND_LAST) ||
1937 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1938 opcode == OP(RDMA_WRITE_MIDDLE) ||
1939 opcode == OP(RDMA_WRITE_LAST) ||
1940 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1943 * Note that it is up to the requester to not send a new
1944 * RDMA read or atomic operation before receiving an ACK
1945 * for the previous operation.
1950 if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) {
1951 qp->r_flags |= QIB_R_COMM_EST;
1952 if (qp->ibqp.event_handler) {
1955 ev.device = qp->ibqp.device;
1956 ev.element.qp = &qp->ibqp;
1957 ev.event = IB_EVENT_COMM_EST;
1958 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
1962 /* OK, process the packet. */
1964 case OP(SEND_FIRST):
1965 ret = qib_get_rwqe(qp, 0);
1972 case OP(SEND_MIDDLE):
1973 case OP(RDMA_WRITE_MIDDLE):
1975 /* Check for invalid length PMTU or posted rwqe len. */
1976 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1978 qp->r_rcv_len += pmtu;
1979 if (unlikely(qp->r_rcv_len > qp->r_len))
1981 qib_copy_sge(&qp->r_sge, data, pmtu, 1);
1984 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1986 ret = qib_get_rwqe(qp, 1);
1994 case OP(SEND_ONLY_WITH_IMMEDIATE):
1995 ret = qib_get_rwqe(qp, 0);
2001 if (opcode == OP(SEND_ONLY))
2002 goto no_immediate_data;
2003 /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
2004 case OP(SEND_LAST_WITH_IMMEDIATE):
2006 wc.ex.imm_data = ohdr->u.imm_data;
2008 wc.wc_flags = IB_WC_WITH_IMM;
2011 case OP(RDMA_WRITE_LAST):
2016 /* Get the number of bytes the message was padded by. */
2017 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
2018 /* Check for invalid length. */
2019 /* XXX LAST len should be >= 1 */
2020 if (unlikely(tlen < (hdrsize + pad + 4)))
2022 /* Don't count the CRC. */
2023 tlen -= (hdrsize + pad + 4);
2024 wc.byte_len = tlen + qp->r_rcv_len;
2025 if (unlikely(wc.byte_len > qp->r_len))
2027 qib_copy_sge(&qp->r_sge, data, tlen, 1);
2028 while (qp->r_sge.num_sge) {
2029 atomic_dec(&qp->r_sge.sge.mr->refcount);
2030 if (--qp->r_sge.num_sge)
2031 qp->r_sge.sge = *qp->r_sge.sg_list++;
2034 if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags))
2036 wc.wr_id = qp->r_wr_id;
2037 wc.status = IB_WC_SUCCESS;
2038 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2039 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2040 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2042 wc.opcode = IB_WC_RECV;
2044 wc.src_qp = qp->remote_qpn;
2045 wc.slid = qp->remote_ah_attr.dlid;
2046 wc.sl = qp->remote_ah_attr.sl;
2047 /* zero fields that are N/A */
2050 wc.dlid_path_bits = 0;
2053 /* Signal completion event if the solicited bit is set. */
2054 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
2056 cpu_to_be32(IB_BTH_SOLICITED)) != 0);
2059 case OP(RDMA_WRITE_FIRST):
2060 case OP(RDMA_WRITE_ONLY):
2061 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2062 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2065 reth = &ohdr->u.rc.reth;
2066 hdrsize += sizeof(*reth);
2067 qp->r_len = be32_to_cpu(reth->length);
2069 qp->r_sge.sg_list = NULL;
2070 if (qp->r_len != 0) {
2071 u32 rkey = be32_to_cpu(reth->rkey);
2072 u64 vaddr = be64_to_cpu(reth->vaddr);
2075 /* Check rkey & NAK */
2076 ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2077 rkey, IB_ACCESS_REMOTE_WRITE);
2080 qp->r_sge.num_sge = 1;
2082 qp->r_sge.num_sge = 0;
2083 qp->r_sge.sge.mr = NULL;
2084 qp->r_sge.sge.vaddr = NULL;
2085 qp->r_sge.sge.length = 0;
2086 qp->r_sge.sge.sge_length = 0;
2088 if (opcode == OP(RDMA_WRITE_FIRST))
2090 else if (opcode == OP(RDMA_WRITE_ONLY))
2091 goto no_immediate_data;
2092 ret = qib_get_rwqe(qp, 1);
2097 wc.ex.imm_data = ohdr->u.rc.imm_data;
2099 wc.wc_flags = IB_WC_WITH_IMM;
2102 case OP(RDMA_READ_REQUEST): {
2103 struct qib_ack_entry *e;
2107 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2109 next = qp->r_head_ack_queue + 1;
2110 /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */
2111 if (next > QIB_MAX_RDMA_ATOMIC)
2113 spin_lock_irqsave(&qp->s_lock, flags);
2114 if (unlikely(next == qp->s_tail_ack_queue)) {
2115 if (!qp->s_ack_queue[next].sent)
2116 goto nack_inv_unlck;
2117 qib_update_ack_queue(qp, next);
2119 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2120 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2121 atomic_dec(&e->rdma_sge.mr->refcount);
2122 e->rdma_sge.mr = NULL;
2124 reth = &ohdr->u.rc.reth;
2125 len = be32_to_cpu(reth->length);
2127 u32 rkey = be32_to_cpu(reth->rkey);
2128 u64 vaddr = be64_to_cpu(reth->vaddr);
2131 /* Check rkey & NAK */
2132 ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr,
2133 rkey, IB_ACCESS_REMOTE_READ);
2135 goto nack_acc_unlck;
2137 * Update the next expected PSN. We add 1 later
2138 * below, so only add the remainder here.
2141 qp->r_psn += (len - 1) / pmtu;
2143 e->rdma_sge.mr = NULL;
2144 e->rdma_sge.vaddr = NULL;
2145 e->rdma_sge.length = 0;
2146 e->rdma_sge.sge_length = 0;
2151 e->lpsn = qp->r_psn;
2153 * We need to increment the MSN here instead of when we
2154 * finish sending the result since a duplicate request would
2155 * increment it more than once.
2159 qp->r_state = opcode;
2160 qp->r_nak_state = 0;
2161 qp->r_head_ack_queue = next;
2163 /* Schedule the send tasklet. */
2164 qp->s_flags |= QIB_S_RESP_PENDING;
2165 qib_schedule_send(qp);
2170 case OP(COMPARE_SWAP):
2171 case OP(FETCH_ADD): {
2172 struct ib_atomic_eth *ateth;
2173 struct qib_ack_entry *e;
2180 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2182 next = qp->r_head_ack_queue + 1;
2183 if (next > QIB_MAX_RDMA_ATOMIC)
2185 spin_lock_irqsave(&qp->s_lock, flags);
2186 if (unlikely(next == qp->s_tail_ack_queue)) {
2187 if (!qp->s_ack_queue[next].sent)
2188 goto nack_inv_unlck;
2189 qib_update_ack_queue(qp, next);
2191 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2192 if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) {
2193 atomic_dec(&e->rdma_sge.mr->refcount);
2194 e->rdma_sge.mr = NULL;
2196 ateth = &ohdr->u.atomic_eth;
2197 vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
2198 be32_to_cpu(ateth->vaddr[1]);
2199 if (unlikely(vaddr & (sizeof(u64) - 1)))
2200 goto nack_inv_unlck;
2201 rkey = be32_to_cpu(ateth->rkey);
2202 /* Check rkey & NAK */
2203 if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2205 IB_ACCESS_REMOTE_ATOMIC)))
2206 goto nack_acc_unlck;
2207 /* Perform atomic OP and save result. */
2208 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
2209 sdata = be64_to_cpu(ateth->swap_data);
2210 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2211 (u64) atomic64_add_return(sdata, maddr) - sdata :
2212 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
2213 be64_to_cpu(ateth->compare_data),
2215 atomic_dec(&qp->r_sge.sge.mr->refcount);
2216 qp->r_sge.num_sge = 0;
2223 qp->r_state = opcode;
2224 qp->r_nak_state = 0;
2225 qp->r_head_ack_queue = next;
2227 /* Schedule the send tasklet. */
2228 qp->s_flags |= QIB_S_RESP_PENDING;
2229 qib_schedule_send(qp);
2235 /* NAK unknown opcodes. */
2239 qp->r_state = opcode;
2240 qp->r_ack_psn = psn;
2241 qp->r_nak_state = 0;
2242 /* Send an ACK if requested or required. */
2243 if (psn & (1 << 31))
2248 qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
2249 qp->r_ack_psn = qp->r_psn;
2250 /* Queue RNR NAK for later */
2251 if (list_empty(&qp->rspwait)) {
2252 qp->r_flags |= QIB_R_RSP_NAK;
2253 atomic_inc(&qp->refcount);
2254 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2259 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2260 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2261 qp->r_ack_psn = qp->r_psn;
2262 /* Queue NAK for later */
2263 if (list_empty(&qp->rspwait)) {
2264 qp->r_flags |= QIB_R_RSP_NAK;
2265 atomic_inc(&qp->refcount);
2266 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2271 spin_unlock_irqrestore(&qp->s_lock, flags);
2273 qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2274 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2275 qp->r_ack_psn = qp->r_psn;
2276 /* Queue NAK for later */
2277 if (list_empty(&qp->rspwait)) {
2278 qp->r_flags |= QIB_R_RSP_NAK;
2279 atomic_inc(&qp->refcount);
2280 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2285 spin_unlock_irqrestore(&qp->s_lock, flags);
2287 qib_rc_error(qp, IB_WC_LOC_PROT_ERR);
2288 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2289 qp->r_ack_psn = qp->r_psn;
2291 qib_send_rc_ack(qp);
2295 spin_unlock_irqrestore(&qp->s_lock, flags);