a4055ca0061427ed7b658b657af110e6cc2cc138
[pandora-kernel.git] / drivers / infiniband / hw / ipath / ipath_rc.c
1 /*
2  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "ipath_verbs.h"
34 #include "ips_common.h"
35
36 /* cut down ridiculously long IB macro names */
37 #define OP(x) IB_OPCODE_RC_##x
38
39 /**
40  * ipath_init_restart- initialize the qp->s_sge after a restart
41  * @qp: the QP who's SGE we're restarting
42  * @wqe: the work queue to initialize the QP's SGE from
43  *
44  * The QP s_lock should be held.
45  */
46 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
47 {
48         struct ipath_ibdev *dev;
49         u32 len;
50
51         len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
52                 ib_mtu_enum_to_int(qp->path_mtu);
53         qp->s_sge.sge = wqe->sg_list[0];
54         qp->s_sge.sg_list = wqe->sg_list + 1;
55         qp->s_sge.num_sge = wqe->wr.num_sge;
56         ipath_skip_sge(&qp->s_sge, len);
57         qp->s_len = wqe->length - len;
58         dev = to_idev(qp->ibqp.device);
59         spin_lock(&dev->pending_lock);
60         if (qp->timerwait.next == LIST_POISON1)
61                 list_add_tail(&qp->timerwait,
62                               &dev->pending[dev->pending_index]);
63         spin_unlock(&dev->pending_lock);
64 }
65
66 /**
67  * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
68  * @qp: a pointer to the QP
69  * @ohdr: a pointer to the IB header being constructed
70  * @pmtu: the path MTU
71  *
72  * Return bth0 if constructed; otherwise, return 0.
73  * Note the QP s_lock must be held.
74  */
75 static inline u32 ipath_make_rc_ack(struct ipath_qp *qp,
76                                     struct ipath_other_headers *ohdr,
77                                     u32 pmtu)
78 {
79         struct ipath_sge_state *ss;
80         u32 hwords;
81         u32 len;
82         u32 bth0;
83
84         /* header size in 32-bit words LRH+BTH = (8+12)/4. */
85         hwords = 5;
86
87         /*
88          * Send a response.  Note that we are in the responder's
89          * side of the QP context.
90          */
91         switch (qp->s_ack_state) {
92         case OP(RDMA_READ_REQUEST):
93                 ss = &qp->s_rdma_sge;
94                 len = qp->s_rdma_len;
95                 if (len > pmtu) {
96                         len = pmtu;
97                         qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
98                 }
99                 else
100                         qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
101                 qp->s_rdma_len -= len;
102                 bth0 = qp->s_ack_state << 24;
103                 ohdr->u.aeth = ipath_compute_aeth(qp);
104                 hwords++;
105                 break;
106
107         case OP(RDMA_READ_RESPONSE_FIRST):
108                 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
109                 /* FALLTHROUGH */
110         case OP(RDMA_READ_RESPONSE_MIDDLE):
111                 ss = &qp->s_rdma_sge;
112                 len = qp->s_rdma_len;
113                 if (len > pmtu)
114                         len = pmtu;
115                 else {
116                         ohdr->u.aeth = ipath_compute_aeth(qp);
117                         hwords++;
118                         qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
119                 }
120                 qp->s_rdma_len -= len;
121                 bth0 = qp->s_ack_state << 24;
122                 break;
123
124         case OP(RDMA_READ_RESPONSE_LAST):
125         case OP(RDMA_READ_RESPONSE_ONLY):
126                 /*
127                  * We have to prevent new requests from changing
128                  * the r_sge state while a ipath_verbs_send()
129                  * is in progress.
130                  * Changing r_state allows the receiver
131                  * to continue processing new packets.
132                  * We do it here now instead of above so
133                  * that we are sure the packet was sent before
134                  * changing the state.
135                  */
136                 qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
137                 qp->s_ack_state = OP(ACKNOWLEDGE);
138                 return 0;
139
140         case OP(COMPARE_SWAP):
141         case OP(FETCH_ADD):
142                 ss = NULL;
143                 len = 0;
144                 qp->r_state = OP(SEND_LAST);
145                 qp->s_ack_state = OP(ACKNOWLEDGE);
146                 bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
147                 ohdr->u.at.aeth = ipath_compute_aeth(qp);
148                 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
149                 hwords += sizeof(ohdr->u.at) / 4;
150                 break;
151
152         default:
153                 /* Send a regular ACK. */
154                 ss = NULL;
155                 len = 0;
156                 qp->s_ack_state = OP(ACKNOWLEDGE);
157                 bth0 = qp->s_ack_state << 24;
158                 ohdr->u.aeth = ipath_compute_aeth(qp);
159                 hwords++;
160         }
161         qp->s_hdrwords = hwords;
162         qp->s_cur_sge = ss;
163         qp->s_cur_size = len;
164
165         return bth0;
166 }
167
168 /**
169  * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
170  * @qp: a pointer to the QP
171  * @ohdr: a pointer to the IB header being constructed
172  * @pmtu: the path MTU
173  * @bth0p: pointer to the BTH opcode word
174  * @bth2p: pointer to the BTH PSN word
175  *
176  * Return 1 if constructed; otherwise, return 0.
177  * Note the QP s_lock must be held.
178  */
179 static inline int ipath_make_rc_req(struct ipath_qp *qp,
180                                     struct ipath_other_headers *ohdr,
181                                     u32 pmtu, u32 *bth0p, u32 *bth2p)
182 {
183         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
184         struct ipath_sge_state *ss;
185         struct ipath_swqe *wqe;
186         u32 hwords;
187         u32 len;
188         u32 bth0;
189         u32 bth2;
190         char newreq;
191
192         if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
193             qp->s_rnr_timeout)
194                 goto done;
195
196         /* header size in 32-bit words LRH+BTH = (8+12)/4. */
197         hwords = 5;
198         bth0 = 0;
199
200         /* Send a request. */
201         wqe = get_swqe_ptr(qp, qp->s_cur);
202         switch (qp->s_state) {
203         default:
204                 /*
205                  * Resend an old request or start a new one.
206                  *
207                  * We keep track of the current SWQE so that
208                  * we don't reset the "furthest progress" state
209                  * if we need to back up.
210                  */
211                 newreq = 0;
212                 if (qp->s_cur == qp->s_tail) {
213                         /* Check if send work queue is empty. */
214                         if (qp->s_tail == qp->s_head)
215                                 goto done;
216                         qp->s_psn = wqe->psn = qp->s_next_psn;
217                         newreq = 1;
218                 }
219                 /*
220                  * Note that we have to be careful not to modify the
221                  * original work request since we may need to resend
222                  * it.
223                  */
224                 qp->s_sge.sge = wqe->sg_list[0];
225                 qp->s_sge.sg_list = wqe->sg_list + 1;
226                 qp->s_sge.num_sge = wqe->wr.num_sge;
227                 qp->s_len = len = wqe->length;
228                 ss = &qp->s_sge;
229                 bth2 = 0;
230                 switch (wqe->wr.opcode) {
231                 case IB_WR_SEND:
232                 case IB_WR_SEND_WITH_IMM:
233                         /* If no credit, return. */
234                         if (qp->s_lsn != (u32) -1 &&
235                             ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
236                                 goto done;
237                         wqe->lpsn = wqe->psn;
238                         if (len > pmtu) {
239                                 wqe->lpsn += (len - 1) / pmtu;
240                                 qp->s_state = OP(SEND_FIRST);
241                                 len = pmtu;
242                                 break;
243                         }
244                         if (wqe->wr.opcode == IB_WR_SEND)
245                                 qp->s_state = OP(SEND_ONLY);
246                         else {
247                                 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
248                                 /* Immediate data comes after the BTH */
249                                 ohdr->u.imm_data = wqe->wr.imm_data;
250                                 hwords += 1;
251                         }
252                         if (wqe->wr.send_flags & IB_SEND_SOLICITED)
253                                 bth0 |= 1 << 23;
254                         bth2 = 1 << 31; /* Request ACK. */
255                         if (++qp->s_cur == qp->s_size)
256                                 qp->s_cur = 0;
257                         break;
258
259                 case IB_WR_RDMA_WRITE:
260                         if (newreq)
261                                 qp->s_lsn++;
262                         /* FALLTHROUGH */
263                 case IB_WR_RDMA_WRITE_WITH_IMM:
264                         /* If no credit, return. */
265                         if (qp->s_lsn != (u32) -1 &&
266                             ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
267                                 goto done;
268                         ohdr->u.rc.reth.vaddr =
269                                 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
270                         ohdr->u.rc.reth.rkey =
271                                 cpu_to_be32(wqe->wr.wr.rdma.rkey);
272                         ohdr->u.rc.reth.length = cpu_to_be32(len);
273                         hwords += sizeof(struct ib_reth) / 4;
274                         wqe->lpsn = wqe->psn;
275                         if (len > pmtu) {
276                                 wqe->lpsn += (len - 1) / pmtu;
277                                 qp->s_state = OP(RDMA_WRITE_FIRST);
278                                 len = pmtu;
279                                 break;
280                         }
281                         if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
282                                 qp->s_state = OP(RDMA_WRITE_ONLY);
283                         else {
284                                 qp->s_state =
285                                         OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
286                                 /* Immediate data comes
287                                  * after RETH */
288                                 ohdr->u.rc.imm_data = wqe->wr.imm_data;
289                                 hwords += 1;
290                                 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
291                                         bth0 |= 1 << 23;
292                         }
293                         bth2 = 1 << 31; /* Request ACK. */
294                         if (++qp->s_cur == qp->s_size)
295                                 qp->s_cur = 0;
296                         break;
297
298                 case IB_WR_RDMA_READ:
299                         ohdr->u.rc.reth.vaddr =
300                                 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
301                         ohdr->u.rc.reth.rkey =
302                                 cpu_to_be32(wqe->wr.wr.rdma.rkey);
303                         ohdr->u.rc.reth.length = cpu_to_be32(len);
304                         qp->s_state = OP(RDMA_READ_REQUEST);
305                         hwords += sizeof(ohdr->u.rc.reth) / 4;
306                         if (newreq) {
307                                 qp->s_lsn++;
308                                 /*
309                                  * Adjust s_next_psn to count the
310                                  * expected number of responses.
311                                  */
312                                 if (len > pmtu)
313                                         qp->s_next_psn += (len - 1) / pmtu;
314                                 wqe->lpsn = qp->s_next_psn++;
315                         }
316                         ss = NULL;
317                         len = 0;
318                         if (++qp->s_cur == qp->s_size)
319                                 qp->s_cur = 0;
320                         break;
321
322                 case IB_WR_ATOMIC_CMP_AND_SWP:
323                 case IB_WR_ATOMIC_FETCH_AND_ADD:
324                         if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
325                                 qp->s_state = OP(COMPARE_SWAP);
326                         else
327                                 qp->s_state = OP(FETCH_ADD);
328                         ohdr->u.atomic_eth.vaddr = cpu_to_be64(
329                                 wqe->wr.wr.atomic.remote_addr);
330                         ohdr->u.atomic_eth.rkey = cpu_to_be32(
331                                 wqe->wr.wr.atomic.rkey);
332                         ohdr->u.atomic_eth.swap_data = cpu_to_be64(
333                                 wqe->wr.wr.atomic.swap);
334                         ohdr->u.atomic_eth.compare_data = cpu_to_be64(
335                                 wqe->wr.wr.atomic.compare_add);
336                         hwords += sizeof(struct ib_atomic_eth) / 4;
337                         if (newreq) {
338                                 qp->s_lsn++;
339                                 wqe->lpsn = wqe->psn;
340                         }
341                         if (++qp->s_cur == qp->s_size)
342                                 qp->s_cur = 0;
343                         ss = NULL;
344                         len = 0;
345                         break;
346
347                 default:
348                         goto done;
349                 }
350                 if (newreq) {
351                         qp->s_tail++;
352                         if (qp->s_tail >= qp->s_size)
353                                 qp->s_tail = 0;
354                 }
355                 bth2 |= qp->s_psn++ & IPS_PSN_MASK;
356                 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
357                         qp->s_next_psn = qp->s_psn;
358                 spin_lock(&dev->pending_lock);
359                 if (qp->timerwait.next == LIST_POISON1)
360                         list_add_tail(&qp->timerwait,
361                                       &dev->pending[dev->pending_index]);
362                 spin_unlock(&dev->pending_lock);
363                 break;
364
365         case OP(RDMA_READ_RESPONSE_FIRST):
366                 /*
367                  * This case can only happen if a send is restarted.  See
368                  * ipath_restart_rc().
369                  */
370                 ipath_init_restart(qp, wqe);
371                 /* FALLTHROUGH */
372         case OP(SEND_FIRST):
373                 qp->s_state = OP(SEND_MIDDLE);
374                 /* FALLTHROUGH */
375         case OP(SEND_MIDDLE):
376                 bth2 = qp->s_psn++ & IPS_PSN_MASK;
377                 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
378                         qp->s_next_psn = qp->s_psn;
379                 ss = &qp->s_sge;
380                 len = qp->s_len;
381                 if (len > pmtu) {
382                         /*
383                          * Request an ACK every 1/2 MB to avoid retransmit
384                          * timeouts.
385                          */
386                         if (((wqe->length - len) % (512 * 1024)) == 0)
387                                 bth2 |= 1 << 31;
388                         len = pmtu;
389                         break;
390                 }
391                 if (wqe->wr.opcode == IB_WR_SEND)
392                         qp->s_state = OP(SEND_LAST);
393                 else {
394                         qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
395                         /* Immediate data comes after the BTH */
396                         ohdr->u.imm_data = wqe->wr.imm_data;
397                         hwords += 1;
398                 }
399                 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
400                         bth0 |= 1 << 23;
401                 bth2 |= 1 << 31;        /* Request ACK. */
402                 qp->s_cur++;
403                 if (qp->s_cur >= qp->s_size)
404                         qp->s_cur = 0;
405                 break;
406
407         case OP(RDMA_READ_RESPONSE_LAST):
408                 /*
409                  * This case can only happen if a RDMA write is restarted.
410                  * See ipath_restart_rc().
411                  */
412                 ipath_init_restart(qp, wqe);
413                 /* FALLTHROUGH */
414         case OP(RDMA_WRITE_FIRST):
415                 qp->s_state = OP(RDMA_WRITE_MIDDLE);
416                 /* FALLTHROUGH */
417         case OP(RDMA_WRITE_MIDDLE):
418                 bth2 = qp->s_psn++ & IPS_PSN_MASK;
419                 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
420                         qp->s_next_psn = qp->s_psn;
421                 ss = &qp->s_sge;
422                 len = qp->s_len;
423                 if (len > pmtu) {
424                         /*
425                          * Request an ACK every 1/2 MB to avoid retransmit
426                          * timeouts.
427                          */
428                         if (((wqe->length - len) % (512 * 1024)) == 0)
429                                 bth2 |= 1 << 31;
430                         len = pmtu;
431                         break;
432                 }
433                 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
434                         qp->s_state = OP(RDMA_WRITE_LAST);
435                 else {
436                         qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
437                         /* Immediate data comes after the BTH */
438                         ohdr->u.imm_data = wqe->wr.imm_data;
439                         hwords += 1;
440                         if (wqe->wr.send_flags & IB_SEND_SOLICITED)
441                                 bth0 |= 1 << 23;
442                 }
443                 bth2 |= 1 << 31;        /* Request ACK. */
444                 qp->s_cur++;
445                 if (qp->s_cur >= qp->s_size)
446                         qp->s_cur = 0;
447                 break;
448
449         case OP(RDMA_READ_RESPONSE_MIDDLE):
450                 /*
451                  * This case can only happen if a RDMA read is restarted.
452                  * See ipath_restart_rc().
453                  */
454                 ipath_init_restart(qp, wqe);
455                 len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
456                 ohdr->u.rc.reth.vaddr =
457                         cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
458                 ohdr->u.rc.reth.rkey =
459                         cpu_to_be32(wqe->wr.wr.rdma.rkey);
460                 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
461                 qp->s_state = OP(RDMA_READ_REQUEST);
462                 hwords += sizeof(ohdr->u.rc.reth) / 4;
463                 bth2 = qp->s_psn++ & IPS_PSN_MASK;
464                 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
465                         qp->s_next_psn = qp->s_psn;
466                 ss = NULL;
467                 len = 0;
468                 qp->s_cur++;
469                 if (qp->s_cur == qp->s_size)
470                         qp->s_cur = 0;
471                 break;
472
473         case OP(RDMA_READ_REQUEST):
474         case OP(COMPARE_SWAP):
475         case OP(FETCH_ADD):
476                 /*
477                  * We shouldn't start anything new until this request is
478                  * finished.  The ACK will handle rescheduling us.  XXX The
479                  * number of outstanding ones is negotiated at connection
480                  * setup time (see pg. 258,289)?  XXX Also, if we support
481                  * multiple outstanding requests, we need to check the WQE
482                  * IB_SEND_FENCE flag and not send a new request if a RDMA
483                  * read or atomic is pending.
484                  */
485                 goto done;
486         }
487         qp->s_len -= len;
488         qp->s_hdrwords = hwords;
489         qp->s_cur_sge = ss;
490         qp->s_cur_size = len;
491         *bth0p = bth0 | (qp->s_state << 24);
492         *bth2p = bth2;
493         return 1;
494
495 done:
496         return 0;
497 }
498
499 static inline void ipath_make_rc_grh(struct ipath_qp *qp,
500                                      struct ib_global_route *grh,
501                                      u32 nwords)
502 {
503         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
504
505         /* GRH header size in 32-bit words. */
506         qp->s_hdrwords += 10;
507         qp->s_hdr.u.l.grh.version_tclass_flow =
508                 cpu_to_be32((6 << 28) |
509                             (grh->traffic_class << 20) |
510                             grh->flow_label);
511         qp->s_hdr.u.l.grh.paylen =
512                 cpu_to_be16(((qp->s_hdrwords - 12) + nwords +
513                              SIZE_OF_CRC) << 2);
514         /* next_hdr is defined by C8-7 in ch. 8.4.1 */
515         qp->s_hdr.u.l.grh.next_hdr = 0x1B;
516         qp->s_hdr.u.l.grh.hop_limit = grh->hop_limit;
517         /* The SGID is 32-bit aligned. */
518         qp->s_hdr.u.l.grh.sgid.global.subnet_prefix = dev->gid_prefix;
519         qp->s_hdr.u.l.grh.sgid.global.interface_id =
520                 ipath_layer_get_guid(dev->dd);
521         qp->s_hdr.u.l.grh.dgid = grh->dgid;
522 }
523
524 /**
525  * ipath_do_rc_send - perform a send on an RC QP
526  * @data: contains a pointer to the QP
527  *
528  * Process entries in the send work queue until credit or queue is
529  * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
530  * Otherwise, after we drop the QP s_lock, two threads could send
531  * packets out of order.
532  */
533 void ipath_do_rc_send(unsigned long data)
534 {
535         struct ipath_qp *qp = (struct ipath_qp *)data;
536         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
537         unsigned long flags;
538         u16 lrh0;
539         u32 nwords;
540         u32 extra_bytes;
541         u32 bth0;
542         u32 bth2;
543         u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
544         struct ipath_other_headers *ohdr;
545
546         if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
547                 goto bail;
548
549         if (unlikely(qp->remote_ah_attr.dlid ==
550                      ipath_layer_get_lid(dev->dd))) {
551                 struct ib_wc wc;
552
553                 /*
554                  * Pass in an uninitialized ib_wc to be consistent with
555                  * other places where ipath_ruc_loopback() is called.
556                  */
557                 ipath_ruc_loopback(qp, &wc);
558                 goto clear;
559         }
560
561         ohdr = &qp->s_hdr.u.oth;
562         if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
563                 ohdr = &qp->s_hdr.u.l.oth;
564
565 again:
566         /* Check for a constructed packet to be sent. */
567         if (qp->s_hdrwords != 0) {
568                 /*
569                  * If no PIO bufs are available, return.  An interrupt will
570                  * call ipath_ib_piobufavail() when one is available.
571                  */
572                 _VERBS_INFO("h %u %p\n", qp->s_hdrwords, &qp->s_hdr);
573                 _VERBS_INFO("d %u %p %u %p %u %u %u %u\n", qp->s_cur_size,
574                             qp->s_cur_sge->sg_list,
575                             qp->s_cur_sge->num_sge,
576                             qp->s_cur_sge->sge.vaddr,
577                             qp->s_cur_sge->sge.sge_length,
578                             qp->s_cur_sge->sge.length,
579                             qp->s_cur_sge->sge.m,
580                             qp->s_cur_sge->sge.n);
581                 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
582                                      (u32 *) &qp->s_hdr, qp->s_cur_size,
583                                      qp->s_cur_sge)) {
584                         ipath_no_bufs_available(qp, dev);
585                         goto bail;
586                 }
587                 dev->n_unicast_xmit++;
588                 /* Record that we sent the packet and s_hdr is empty. */
589                 qp->s_hdrwords = 0;
590         }
591
592         /*
593          * The lock is needed to synchronize between setting
594          * qp->s_ack_state, resend timer, and post_send().
595          */
596         spin_lock_irqsave(&qp->s_lock, flags);
597
598         /* Sending responses has higher priority over sending requests. */
599         if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
600             (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
601                 bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
602         else if (!ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2))
603                 goto done;
604
605         spin_unlock_irqrestore(&qp->s_lock, flags);
606
607         /* Construct the header. */
608         extra_bytes = (4 - qp->s_cur_size) & 3;
609         nwords = (qp->s_cur_size + extra_bytes) >> 2;
610         lrh0 = IPS_LRH_BTH;
611         if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
612                 ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, nwords);
613                 lrh0 = IPS_LRH_GRH;
614         }
615         lrh0 |= qp->remote_ah_attr.sl << 4;
616         qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
617         qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
618         qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
619                                        SIZE_OF_CRC);
620         qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
621         bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
622         bth0 |= extra_bytes << 20;
623         ohdr->bth[0] = cpu_to_be32(bth0);
624         ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
625         ohdr->bth[2] = cpu_to_be32(bth2);
626
627         /* Check for more work to do. */
628         goto again;
629
630 done:
631         spin_unlock_irqrestore(&qp->s_lock, flags);
632 clear:
633         clear_bit(IPATH_S_BUSY, &qp->s_flags);
634 bail:
635         return;
636 }
637
638 static void send_rc_ack(struct ipath_qp *qp)
639 {
640         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
641         u16 lrh0;
642         u32 bth0;
643         struct ipath_other_headers *ohdr;
644
645         /* Construct the header. */
646         ohdr = &qp->s_hdr.u.oth;
647         lrh0 = IPS_LRH_BTH;
648         /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
649         qp->s_hdrwords = 6;
650         if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
651                 ipath_make_rc_grh(qp, &qp->remote_ah_attr.grh, 0);
652                 ohdr = &qp->s_hdr.u.l.oth;
653                 lrh0 = IPS_LRH_GRH;
654         }
655         bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
656         ohdr->u.aeth = ipath_compute_aeth(qp);
657         if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
658                 bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
659                 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
660                 qp->s_hdrwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
661         }
662         else
663                 bth0 |= OP(ACKNOWLEDGE) << 24;
664         lrh0 |= qp->remote_ah_attr.sl << 4;
665         qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
666         qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
667         qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + SIZE_OF_CRC);
668         qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
669         ohdr->bth[0] = cpu_to_be32(bth0);
670         ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
671         ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
672
673         /*
674          * If we can send the ACK, clear the ACK state.
675          */
676         if (ipath_verbs_send(dev->dd, qp->s_hdrwords, (u32 *) &qp->s_hdr,
677                              0, NULL) == 0) {
678                 qp->s_ack_state = OP(ACKNOWLEDGE);
679                 dev->n_rc_qacks++;
680                 dev->n_unicast_xmit++;
681         }
682 }
683
684 /**
685  * ipath_restart_rc - back up requester to resend the last un-ACKed request
686  * @qp: the QP to restart
687  * @psn: packet sequence number for the request
688  * @wc: the work completion request
689  *
690  * The QP s_lock should be held.
691  */
692 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
693 {
694         struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
695         struct ipath_ibdev *dev;
696         u32 n;
697
698         /*
699          * If there are no requests pending, we are done.
700          */
701         if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
702             qp->s_last == qp->s_tail)
703                 goto done;
704
705         if (qp->s_retry == 0) {
706                 wc->wr_id = wqe->wr.wr_id;
707                 wc->status = IB_WC_RETRY_EXC_ERR;
708                 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
709                 wc->vendor_err = 0;
710                 wc->byte_len = 0;
711                 wc->qp_num = qp->ibqp.qp_num;
712                 wc->src_qp = qp->remote_qpn;
713                 wc->pkey_index = 0;
714                 wc->slid = qp->remote_ah_attr.dlid;
715                 wc->sl = qp->remote_ah_attr.sl;
716                 wc->dlid_path_bits = 0;
717                 wc->port_num = 0;
718                 ipath_sqerror_qp(qp, wc);
719                 goto bail;
720         }
721         qp->s_retry--;
722
723         /*
724          * Remove the QP from the timeout queue.
725          * Note: it may already have been removed by ipath_ib_timer().
726          */
727         dev = to_idev(qp->ibqp.device);
728         spin_lock(&dev->pending_lock);
729         if (qp->timerwait.next != LIST_POISON1)
730                 list_del(&qp->timerwait);
731         spin_unlock(&dev->pending_lock);
732
733         if (wqe->wr.opcode == IB_WR_RDMA_READ)
734                 dev->n_rc_resends++;
735         else
736                 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
737
738         /*
739          * If we are starting the request from the beginning, let the normal
740          * send code handle initialization.
741          */
742         qp->s_cur = qp->s_last;
743         if (ipath_cmp24(psn, wqe->psn) <= 0) {
744                 qp->s_state = OP(SEND_LAST);
745                 qp->s_psn = wqe->psn;
746         } else {
747                 n = qp->s_cur;
748                 for (;;) {
749                         if (++n == qp->s_size)
750                                 n = 0;
751                         if (n == qp->s_tail) {
752                                 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
753                                         qp->s_cur = n;
754                                         wqe = get_swqe_ptr(qp, n);
755                                 }
756                                 break;
757                         }
758                         wqe = get_swqe_ptr(qp, n);
759                         if (ipath_cmp24(psn, wqe->psn) < 0)
760                                 break;
761                         qp->s_cur = n;
762                 }
763                 qp->s_psn = psn;
764
765                 /*
766                  * Reset the state to restart in the middle of a request.
767                  * Don't change the s_sge, s_cur_sge, or s_cur_size.
768                  * See ipath_do_rc_send().
769                  */
770                 switch (wqe->wr.opcode) {
771                 case IB_WR_SEND:
772                 case IB_WR_SEND_WITH_IMM:
773                         qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
774                         break;
775
776                 case IB_WR_RDMA_WRITE:
777                 case IB_WR_RDMA_WRITE_WITH_IMM:
778                         qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
779                         break;
780
781                 case IB_WR_RDMA_READ:
782                         qp->s_state =
783                                 OP(RDMA_READ_RESPONSE_MIDDLE);
784                         break;
785
786                 default:
787                         /*
788                          * This case shouldn't happen since its only
789                          * one PSN per req.
790                          */
791                         qp->s_state = OP(SEND_LAST);
792                 }
793         }
794
795 done:
796         tasklet_hi_schedule(&qp->s_task);
797
798 bail:
799         return;
800 }
801
802 /**
803  * reset_psn - reset the QP state to send starting from PSN
804  * @qp: the QP
805  * @psn: the packet sequence number to restart at
806  *
807  * This is called from ipath_rc_rcv() to process an incoming RC ACK
808  * for the given QP.
809  * Called at interrupt level with the QP s_lock held.
810  */
811 static void reset_psn(struct ipath_qp *qp, u32 psn)
812 {
813         struct ipath_swqe *wqe;
814         u32 n;
815
816         n = qp->s_cur;
817         wqe = get_swqe_ptr(qp, n);
818         for (;;) {
819                 if (++n == qp->s_size)
820                         n = 0;
821                 if (n == qp->s_tail) {
822                         if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
823                                 qp->s_cur = n;
824                                 wqe = get_swqe_ptr(qp, n);
825                         }
826                         break;
827                 }
828                 wqe = get_swqe_ptr(qp, n);
829                 if (ipath_cmp24(psn, wqe->psn) < 0)
830                         break;
831                 qp->s_cur = n;
832         }
833         qp->s_psn = psn;
834
835         /*
836          * Set the state to restart in the middle of a
837          * request.  Don't change the s_sge, s_cur_sge, or
838          * s_cur_size.  See ipath_do_rc_send().
839          */
840         switch (wqe->wr.opcode) {
841         case IB_WR_SEND:
842         case IB_WR_SEND_WITH_IMM:
843                 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
844                 break;
845
846         case IB_WR_RDMA_WRITE:
847         case IB_WR_RDMA_WRITE_WITH_IMM:
848                 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
849                 break;
850
851         case IB_WR_RDMA_READ:
852                 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
853                 break;
854
855         default:
856                 /*
857                  * This case shouldn't happen since its only
858                  * one PSN per req.
859                  */
860                 qp->s_state = OP(SEND_LAST);
861         }
862 }
863
864 /**
865  * do_rc_ack - process an incoming RC ACK
866  * @qp: the QP the ACK came in on
867  * @psn: the packet sequence number of the ACK
868  * @opcode: the opcode of the request that resulted in the ACK
869  *
870  * This is called from ipath_rc_rcv() to process an incoming RC ACK
871  * for the given QP.
872  * Called at interrupt level with the QP s_lock held.
873  * Returns 1 if OK, 0 if current operation should be aborted (NAK).
874  */
875 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
876 {
877         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
878         struct ib_wc wc;
879         struct ipath_swqe *wqe;
880         int ret = 0;
881
882         /*
883          * Remove the QP from the timeout queue (or RNR timeout queue).
884          * If ipath_ib_timer() has already removed it,
885          * it's OK since we hold the QP s_lock and ipath_restart_rc()
886          * just won't find anything to restart if we ACK everything.
887          */
888         spin_lock(&dev->pending_lock);
889         if (qp->timerwait.next != LIST_POISON1)
890                 list_del(&qp->timerwait);
891         spin_unlock(&dev->pending_lock);
892
893         /*
894          * Note that NAKs implicitly ACK outstanding SEND and RDMA write
895          * requests and implicitly NAK RDMA read and atomic requests issued
896          * before the NAK'ed request.  The MSN won't include the NAK'ed
897          * request but will include an ACK'ed request(s).
898          */
899         wqe = get_swqe_ptr(qp, qp->s_last);
900
901         /* Nothing is pending to ACK/NAK. */
902         if (qp->s_last == qp->s_tail)
903                 goto bail;
904
905         /*
906          * The MSN might be for a later WQE than the PSN indicates so
907          * only complete WQEs that the PSN finishes.
908          */
909         while (ipath_cmp24(psn, wqe->lpsn) >= 0) {
910                 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
911                 if (ipath_cmp24(aeth, wqe->ssn) < 0)
912                         break;
913                 /*
914                  * If this request is a RDMA read or atomic, and the ACK is
915                  * for a later operation, this ACK NAKs the RDMA read or
916                  * atomic.  In other words, only a RDMA_READ_LAST or ONLY
917                  * can ACK a RDMA read and likewise for atomic ops.  Note
918                  * that the NAK case can only happen if relaxed ordering is
919                  * used and requests are sent after an RDMA read or atomic
920                  * is sent but before the response is received.
921                  */
922                 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
923                      opcode != OP(RDMA_READ_RESPONSE_LAST)) ||
924                     ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
925                       wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
926                      (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
927                       ipath_cmp24(wqe->psn, psn) != 0))) {
928                         /*
929                          * The last valid PSN seen is the previous
930                          * request's.
931                          */
932                         qp->s_last_psn = wqe->psn - 1;
933                         /* Retry this request. */
934                         ipath_restart_rc(qp, wqe->psn, &wc);
935                         /*
936                          * No need to process the ACK/NAK since we are
937                          * restarting an earlier request.
938                          */
939                         goto bail;
940                 }
941                 /* Post a send completion queue entry if requested. */
942                 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
943                     (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
944                         wc.wr_id = wqe->wr.wr_id;
945                         wc.status = IB_WC_SUCCESS;
946                         wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
947                         wc.vendor_err = 0;
948                         wc.byte_len = wqe->length;
949                         wc.qp_num = qp->ibqp.qp_num;
950                         wc.src_qp = qp->remote_qpn;
951                         wc.pkey_index = 0;
952                         wc.slid = qp->remote_ah_attr.dlid;
953                         wc.sl = qp->remote_ah_attr.sl;
954                         wc.dlid_path_bits = 0;
955                         wc.port_num = 0;
956                         ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
957                 }
958                 qp->s_retry = qp->s_retry_cnt;
959                 /*
960                  * If we are completing a request which is in the process of
961                  * being resent, we can stop resending it since we know the
962                  * responder has already seen it.
963                  */
964                 if (qp->s_last == qp->s_cur) {
965                         if (++qp->s_cur >= qp->s_size)
966                                 qp->s_cur = 0;
967                         wqe = get_swqe_ptr(qp, qp->s_cur);
968                         qp->s_state = OP(SEND_LAST);
969                         qp->s_psn = wqe->psn;
970                 }
971                 if (++qp->s_last >= qp->s_size)
972                         qp->s_last = 0;
973                 wqe = get_swqe_ptr(qp, qp->s_last);
974                 if (qp->s_last == qp->s_tail)
975                         break;
976         }
977
978         switch (aeth >> 29) {
979         case 0:         /* ACK */
980                 dev->n_rc_acks++;
981                 /* If this is a partial ACK, reset the retransmit timer. */
982                 if (qp->s_last != qp->s_tail) {
983                         spin_lock(&dev->pending_lock);
984                         list_add_tail(&qp->timerwait,
985                                       &dev->pending[dev->pending_index]);
986                         spin_unlock(&dev->pending_lock);
987                 }
988                 ipath_get_credit(qp, aeth);
989                 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
990                 qp->s_retry = qp->s_retry_cnt;
991                 qp->s_last_psn = psn;
992                 ret = 1;
993                 goto bail;
994
995         case 1:         /* RNR NAK */
996                 dev->n_rnr_naks++;
997                 if (qp->s_rnr_retry == 0) {
998                         if (qp->s_last == qp->s_tail)
999                                 goto bail;
1000
1001                         wc.status = IB_WC_RNR_RETRY_EXC_ERR;
1002                         goto class_b;
1003                 }
1004                 if (qp->s_rnr_retry_cnt < 7)
1005                         qp->s_rnr_retry--;
1006                 if (qp->s_last == qp->s_tail)
1007                         goto bail;
1008
1009                 /* The last valid PSN seen is the previous request's. */
1010                 qp->s_last_psn = wqe->psn - 1;
1011
1012                 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
1013
1014                 /*
1015                  * If we are starting the request from the beginning, let
1016                  * the normal send code handle initialization.
1017                  */
1018                 qp->s_cur = qp->s_last;
1019                 wqe = get_swqe_ptr(qp, qp->s_cur);
1020                 if (ipath_cmp24(psn, wqe->psn) <= 0) {
1021                         qp->s_state = OP(SEND_LAST);
1022                         qp->s_psn = wqe->psn;
1023                 } else
1024                         reset_psn(qp, psn);
1025
1026                 qp->s_rnr_timeout =
1027                         ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
1028                                            IPS_AETH_CREDIT_MASK];
1029                 ipath_insert_rnr_queue(qp);
1030                 goto bail;
1031
1032         case 3:         /* NAK */
1033                 /* The last valid PSN seen is the previous request's. */
1034                 if (qp->s_last != qp->s_tail)
1035                         qp->s_last_psn = wqe->psn - 1;
1036                 switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
1037                         IPS_AETH_CREDIT_MASK) {
1038                 case 0: /* PSN sequence error */
1039                         dev->n_seq_naks++;
1040                         /*
1041                          * Back up to the responder's expected PSN.  XXX
1042                          * Note that we might get a NAK in the middle of an
1043                          * RDMA READ response which terminates the RDMA
1044                          * READ.
1045                          */
1046                         if (qp->s_last == qp->s_tail)
1047                                 break;
1048
1049                         if (ipath_cmp24(psn, wqe->psn) < 0)
1050                                 break;
1051
1052                         /* Retry the request. */
1053                         ipath_restart_rc(qp, psn, &wc);
1054                         break;
1055
1056                 case 1: /* Invalid Request */
1057                         wc.status = IB_WC_REM_INV_REQ_ERR;
1058                         dev->n_other_naks++;
1059                         goto class_b;
1060
1061                 case 2: /* Remote Access Error */
1062                         wc.status = IB_WC_REM_ACCESS_ERR;
1063                         dev->n_other_naks++;
1064                         goto class_b;
1065
1066                 case 3: /* Remote Operation Error */
1067                         wc.status = IB_WC_REM_OP_ERR;
1068                         dev->n_other_naks++;
1069                 class_b:
1070                         wc.wr_id = wqe->wr.wr_id;
1071                         wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1072                         wc.vendor_err = 0;
1073                         wc.byte_len = 0;
1074                         wc.qp_num = qp->ibqp.qp_num;
1075                         wc.src_qp = qp->remote_qpn;
1076                         wc.pkey_index = 0;
1077                         wc.slid = qp->remote_ah_attr.dlid;
1078                         wc.sl = qp->remote_ah_attr.sl;
1079                         wc.dlid_path_bits = 0;
1080                         wc.port_num = 0;
1081                         ipath_sqerror_qp(qp, &wc);
1082                         break;
1083
1084                 default:
1085                         /* Ignore other reserved NAK error codes */
1086                         goto reserved;
1087                 }
1088                 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1089                 goto bail;
1090
1091         default:                /* 2: reserved */
1092         reserved:
1093                 /* Ignore reserved NAK codes. */
1094                 goto bail;
1095         }
1096
1097 bail:
1098         return ret;
1099 }
1100
1101 /**
1102  * ipath_rc_rcv_resp - process an incoming RC response packet
1103  * @dev: the device this packet came in on
1104  * @ohdr: the other headers for this packet
1105  * @data: the packet data
1106  * @tlen: the packet length
1107  * @qp: the QP for this packet
1108  * @opcode: the opcode for this packet
1109  * @psn: the packet sequence number for this packet
1110  * @hdrsize: the header length
1111  * @pmtu: the path MTU
1112  * @header_in_data: true if part of the header data is in the data buffer
1113  *
1114  * This is called from ipath_rc_rcv() to process an incoming RC response
1115  * packet for the given QP.
1116  * Called at interrupt level.
1117  */
1118 static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
1119                                      struct ipath_other_headers *ohdr,
1120                                      void *data, u32 tlen,
1121                                      struct ipath_qp *qp,
1122                                      u32 opcode,
1123                                      u32 psn, u32 hdrsize, u32 pmtu,
1124                                      int header_in_data)
1125 {
1126         unsigned long flags;
1127         struct ib_wc wc;
1128         int diff;
1129         u32 pad;
1130         u32 aeth;
1131
1132         spin_lock_irqsave(&qp->s_lock, flags);
1133
1134         /* Ignore invalid responses. */
1135         if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1136                 goto ack_done;
1137
1138         /* Ignore duplicate responses. */
1139         diff = ipath_cmp24(psn, qp->s_last_psn);
1140         if (unlikely(diff <= 0)) {
1141                 /* Update credits for "ghost" ACKs */
1142                 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1143                         if (!header_in_data)
1144                                 aeth = be32_to_cpu(ohdr->u.aeth);
1145                         else {
1146                                 aeth = be32_to_cpu(((__be32 *) data)[0]);
1147                                 data += sizeof(__be32);
1148                         }
1149                         if ((aeth >> 29) == 0)
1150                                 ipath_get_credit(qp, aeth);
1151                 }
1152                 goto ack_done;
1153         }
1154
1155         switch (opcode) {
1156         case OP(ACKNOWLEDGE):
1157         case OP(ATOMIC_ACKNOWLEDGE):
1158         case OP(RDMA_READ_RESPONSE_FIRST):
1159                 if (!header_in_data)
1160                         aeth = be32_to_cpu(ohdr->u.aeth);
1161                 else {
1162                         aeth = be32_to_cpu(((__be32 *) data)[0]);
1163                         data += sizeof(__be32);
1164                 }
1165                 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1166                         *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
1167                 if (!do_rc_ack(qp, aeth, psn, opcode) ||
1168                     opcode != OP(RDMA_READ_RESPONSE_FIRST))
1169                         goto ack_done;
1170                 hdrsize += 4;
1171                 /*
1172                  * do_rc_ack() has already checked the PSN so skip
1173                  * the sequence check.
1174                  */
1175                 goto rdma_read;
1176
1177         case OP(RDMA_READ_RESPONSE_MIDDLE):
1178                 /* no AETH, no ACK */
1179                 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1180                         dev->n_rdma_seq++;
1181                         ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1182                         goto ack_done;
1183                 }
1184         rdma_read:
1185         if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1186                 goto ack_done;
1187         if (unlikely(tlen != (hdrsize + pmtu + 4)))
1188                 goto ack_done;
1189         if (unlikely(pmtu >= qp->s_len))
1190                 goto ack_done;
1191         /* We got a response so update the timeout. */
1192         if (unlikely(qp->s_last == qp->s_tail ||
1193                      get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
1194                      IB_WR_RDMA_READ))
1195                 goto ack_done;
1196         spin_lock(&dev->pending_lock);
1197         if (qp->s_rnr_timeout == 0 &&
1198             qp->timerwait.next != LIST_POISON1)
1199                 list_move_tail(&qp->timerwait,
1200                                &dev->pending[dev->pending_index]);
1201         spin_unlock(&dev->pending_lock);
1202         /*
1203          * Update the RDMA receive state but do the copy w/o holding the
1204          * locks and blocking interrupts.  XXX Yet another place that
1205          * affects relaxed RDMA order since we don't want s_sge modified.
1206          */
1207         qp->s_len -= pmtu;
1208         qp->s_last_psn = psn;
1209         spin_unlock_irqrestore(&qp->s_lock, flags);
1210         ipath_copy_sge(&qp->s_sge, data, pmtu);
1211         goto bail;
1212
1213         case OP(RDMA_READ_RESPONSE_LAST):
1214                 /* ACKs READ req. */
1215                 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1216                         dev->n_rdma_seq++;
1217                         ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1218                         goto ack_done;
1219                 }
1220                 /* FALLTHROUGH */
1221         case OP(RDMA_READ_RESPONSE_ONLY):
1222                 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1223                         goto ack_done;
1224                 /*
1225                  * Get the number of bytes the message was padded by.
1226                  */
1227                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1228                 /*
1229                  * Check that the data size is >= 1 && <= pmtu.
1230                  * Remember to account for the AETH header (4) and
1231                  * ICRC (4).
1232                  */
1233                 if (unlikely(tlen <= (hdrsize + pad + 8))) {
1234                         /*
1235                          * XXX Need to generate an error CQ
1236                          * entry.
1237                          */
1238                         goto ack_done;
1239                 }
1240                 tlen -= hdrsize + pad + 8;
1241                 if (unlikely(tlen != qp->s_len)) {
1242                         /*
1243                          * XXX Need to generate an error CQ
1244                          * entry.
1245                          */
1246                         goto ack_done;
1247                 }
1248                 if (!header_in_data)
1249                         aeth = be32_to_cpu(ohdr->u.aeth);
1250                 else {
1251                         aeth = be32_to_cpu(((__be32 *) data)[0]);
1252                         data += sizeof(__be32);
1253                 }
1254                 ipath_copy_sge(&qp->s_sge, data, tlen);
1255                 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
1256                         /*
1257                          * Change the state so we contimue
1258                          * processing new requests.
1259                          */
1260                         qp->s_state = OP(SEND_LAST);
1261                 }
1262                 goto ack_done;
1263         }
1264
1265 ack_done:
1266         spin_unlock_irqrestore(&qp->s_lock, flags);
1267 bail:
1268         return;
1269 }
1270
1271 /**
1272  * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1273  * @dev: the device this packet came in on
1274  * @ohdr: the other headers for this packet
1275  * @data: the packet data
1276  * @qp: the QP for this packet
1277  * @opcode: the opcode for this packet
1278  * @psn: the packet sequence number for this packet
1279  * @diff: the difference between the PSN and the expected PSN
1280  * @header_in_data: true if part of the header data is in the data buffer
1281  *
1282  * This is called from ipath_rc_rcv() to process an unexpected
1283  * incoming RC packet for the given QP.
1284  * Called at interrupt level.
1285  * Return 1 if no more processing is needed; otherwise return 0 to
1286  * schedule a response to be sent and the s_lock unlocked.
1287  */
1288 static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1289                                      struct ipath_other_headers *ohdr,
1290                                      void *data,
1291                                      struct ipath_qp *qp,
1292                                      u32 opcode,
1293                                      u32 psn,
1294                                      int diff,
1295                                      int header_in_data)
1296 {
1297         struct ib_reth *reth;
1298
1299         if (diff > 0) {
1300                 /*
1301                  * Packet sequence error.
1302                  * A NAK will ACK earlier sends and RDMA writes.
1303                  * Don't queue the NAK if a RDMA read, atomic, or
1304                  * NAK is pending though.
1305                  */
1306                 spin_lock(&qp->s_lock);
1307                 if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1308                      qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
1309                     qp->s_nak_state != 0) {
1310                         spin_unlock(&qp->s_lock);
1311                         goto done;
1312                 }
1313                 qp->s_ack_state = OP(SEND_ONLY);
1314                 qp->s_nak_state = IB_NAK_PSN_ERROR;
1315                 /* Use the expected PSN. */
1316                 qp->s_ack_psn = qp->r_psn;
1317                 goto resched;
1318         }
1319
1320         /*
1321          * Handle a duplicate request.  Don't re-execute SEND, RDMA
1322          * write or atomic op.  Don't NAK errors, just silently drop
1323          * the duplicate request.  Note that r_sge, r_len, and
1324          * r_rcv_len may be in use so don't modify them.
1325          *
1326          * We are supposed to ACK the earliest duplicate PSN but we
1327          * can coalesce an outstanding duplicate ACK.  We have to
1328          * send the earliest so that RDMA reads can be restarted at
1329          * the requester's expected PSN.
1330          */
1331         spin_lock(&qp->s_lock);
1332         if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
1333             ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
1334                 if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
1335                         qp->s_ack_psn = psn;
1336                 spin_unlock(&qp->s_lock);
1337                 goto done;
1338         }
1339         switch (opcode) {
1340         case OP(RDMA_READ_REQUEST):
1341                 /*
1342                  * We have to be careful to not change s_rdma_sge
1343                  * while ipath_do_rc_send() is using it and not
1344                  * holding the s_lock.
1345                  */
1346                 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1347                     qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1348                         spin_unlock(&qp->s_lock);
1349                         dev->n_rdma_dup_busy++;
1350                         goto done;
1351                 }
1352                 /* RETH comes after BTH */
1353                 if (!header_in_data)
1354                         reth = &ohdr->u.rc.reth;
1355                 else {
1356                         reth = (struct ib_reth *)data;
1357                         data += sizeof(*reth);
1358                 }
1359                 qp->s_rdma_len = be32_to_cpu(reth->length);
1360                 if (qp->s_rdma_len != 0) {
1361                         u32 rkey = be32_to_cpu(reth->rkey);
1362                         u64 vaddr = be64_to_cpu(reth->vaddr);
1363                         int ok;
1364
1365                         /*
1366                          * Address range must be a subset of the original
1367                          * request and start on pmtu boundaries.
1368                          */
1369                         ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1370                                            qp->s_rdma_len, vaddr, rkey,
1371                                            IB_ACCESS_REMOTE_READ);
1372                         if (unlikely(!ok))
1373                                 goto done;
1374                 } else {
1375                         qp->s_rdma_sge.sg_list = NULL;
1376                         qp->s_rdma_sge.num_sge = 0;
1377                         qp->s_rdma_sge.sge.mr = NULL;
1378                         qp->s_rdma_sge.sge.vaddr = NULL;
1379                         qp->s_rdma_sge.sge.length = 0;
1380                         qp->s_rdma_sge.sge.sge_length = 0;
1381                 }
1382                 break;
1383
1384         case OP(COMPARE_SWAP):
1385         case OP(FETCH_ADD):
1386                 /*
1387                  * Check for the PSN of the last atomic operations
1388                  * performed and resend the result if found.
1389                  */
1390                 if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
1391                         spin_unlock(&qp->s_lock);
1392                         goto done;
1393                 }
1394                 qp->s_ack_atomic = qp->r_atomic_data;
1395                 break;
1396         }
1397         qp->s_ack_state = opcode;
1398         qp->s_nak_state = 0;
1399         qp->s_ack_psn = psn;
1400 resched:
1401         return 0;
1402
1403 done:
1404         return 1;
1405 }
1406
1407 /**
1408  * ipath_rc_rcv - process an incoming RC packet
1409  * @dev: the device this packet came in on
1410  * @hdr: the header of this packet
1411  * @has_grh: true if the header has a GRH
1412  * @data: the packet data
1413  * @tlen: the packet length
1414  * @qp: the QP for this packet
1415  *
1416  * This is called from ipath_qp_rcv() to process an incoming RC packet
1417  * for the given QP.
1418  * Called at interrupt level.
1419  */
1420 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1421                   int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1422 {
1423         struct ipath_other_headers *ohdr;
1424         u32 opcode;
1425         u32 hdrsize;
1426         u32 psn;
1427         u32 pad;
1428         unsigned long flags;
1429         struct ib_wc wc;
1430         u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1431         int diff;
1432         struct ib_reth *reth;
1433         int header_in_data;
1434
1435         /* Check for GRH */
1436         if (!has_grh) {
1437                 ohdr = &hdr->u.oth;
1438                 hdrsize = 8 + 12;       /* LRH + BTH */
1439                 psn = be32_to_cpu(ohdr->bth[2]);
1440                 header_in_data = 0;
1441         } else {
1442                 ohdr = &hdr->u.l.oth;
1443                 hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
1444                 /*
1445                  * The header with GRH is 60 bytes and the core driver sets
1446                  * the eager header buffer size to 56 bytes so the last 4
1447                  * bytes of the BTH header (PSN) is in the data buffer.
1448                  */
1449                 header_in_data =
1450                         ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
1451                 if (header_in_data) {
1452                         psn = be32_to_cpu(((__be32 *) data)[0]);
1453                         data += sizeof(__be32);
1454                 } else
1455                         psn = be32_to_cpu(ohdr->bth[2]);
1456         }
1457         /*
1458          * The opcode is in the low byte when its in network order
1459          * (top byte when in host order).
1460          */
1461         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1462
1463         /*
1464          * Process responses (ACKs) before anything else.  Note that the
1465          * packet sequence number will be for something in the send work
1466          * queue rather than the expected receive packet sequence number.
1467          * In other words, this QP is the requester.
1468          */
1469         if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1470             opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1471                 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1472                                   hdrsize, pmtu, header_in_data);
1473                 goto bail;
1474         }
1475
1476         spin_lock_irqsave(&qp->r_rq.lock, flags);
1477
1478         /* Compute 24 bits worth of difference. */
1479         diff = ipath_cmp24(psn, qp->r_psn);
1480         if (unlikely(diff)) {
1481                 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1482                                        psn, diff, header_in_data))
1483                         goto done;
1484                 goto resched;
1485         }
1486
1487         /* Check for opcode sequence errors. */
1488         switch (qp->r_state) {
1489         case OP(SEND_FIRST):
1490         case OP(SEND_MIDDLE):
1491                 if (opcode == OP(SEND_MIDDLE) ||
1492                     opcode == OP(SEND_LAST) ||
1493                     opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1494                         break;
1495         nack_inv:
1496         /*
1497          * A NAK will ACK earlier sends and RDMA writes.  Don't queue the
1498          * NAK if a RDMA read, atomic, or NAK is pending though.
1499          */
1500         spin_lock(&qp->s_lock);
1501         if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1502             qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1503                 spin_unlock(&qp->s_lock);
1504                 goto done;
1505         }
1506         /* XXX Flush WQEs */
1507         qp->state = IB_QPS_ERR;
1508         qp->s_ack_state = OP(SEND_ONLY);
1509         qp->s_nak_state = IB_NAK_INVALID_REQUEST;
1510         qp->s_ack_psn = qp->r_psn;
1511         goto resched;
1512
1513         case OP(RDMA_WRITE_FIRST):
1514         case OP(RDMA_WRITE_MIDDLE):
1515                 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1516                     opcode == OP(RDMA_WRITE_LAST) ||
1517                     opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1518                         break;
1519                 goto nack_inv;
1520
1521         case OP(RDMA_READ_REQUEST):
1522         case OP(COMPARE_SWAP):
1523         case OP(FETCH_ADD):
1524                 /*
1525                  * Drop all new requests until a response has been sent.  A
1526                  * new request then ACKs the RDMA response we sent.  Relaxed
1527                  * ordering would allow new requests to be processed but we
1528                  * would need to keep a queue of rwqe's for all that are in
1529                  * progress.  Note that we can't RNR NAK this request since
1530                  * the RDMA READ or atomic response is already queued to be
1531                  * sent (unless we implement a response send queue).
1532                  */
1533                 goto done;
1534
1535         default:
1536                 if (opcode == OP(SEND_MIDDLE) ||
1537                     opcode == OP(SEND_LAST) ||
1538                     opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1539                     opcode == OP(RDMA_WRITE_MIDDLE) ||
1540                     opcode == OP(RDMA_WRITE_LAST) ||
1541                     opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1542                         goto nack_inv;
1543                 break;
1544         }
1545
1546         wc.imm_data = 0;
1547         wc.wc_flags = 0;
1548
1549         /* OK, process the packet. */
1550         switch (opcode) {
1551         case OP(SEND_FIRST):
1552                 if (!ipath_get_rwqe(qp, 0)) {
1553                 rnr_nak:
1554                         /*
1555                          * A RNR NAK will ACK earlier sends and RDMA writes.
1556                          * Don't queue the NAK if a RDMA read or atomic
1557                          * is pending though.
1558                          */
1559                         spin_lock(&qp->s_lock);
1560                         if (qp->s_ack_state >=
1561                             OP(RDMA_READ_REQUEST) &&
1562                             qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1563                                 spin_unlock(&qp->s_lock);
1564                                 goto done;
1565                         }
1566                         qp->s_ack_state = OP(SEND_ONLY);
1567                         qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
1568                         qp->s_ack_psn = qp->r_psn;
1569                         goto resched;
1570                 }
1571                 qp->r_rcv_len = 0;
1572                 /* FALLTHROUGH */
1573         case OP(SEND_MIDDLE):
1574         case OP(RDMA_WRITE_MIDDLE):
1575         send_middle:
1576                 /* Check for invalid length PMTU or posted rwqe len. */
1577                 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1578                         goto nack_inv;
1579                 qp->r_rcv_len += pmtu;
1580                 if (unlikely(qp->r_rcv_len > qp->r_len))
1581                         goto nack_inv;
1582                 ipath_copy_sge(&qp->r_sge, data, pmtu);
1583                 break;
1584
1585         case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1586                 /* consume RWQE */
1587                 if (!ipath_get_rwqe(qp, 1))
1588                         goto rnr_nak;
1589                 goto send_last_imm;
1590
1591         case OP(SEND_ONLY):
1592         case OP(SEND_ONLY_WITH_IMMEDIATE):
1593                 if (!ipath_get_rwqe(qp, 0))
1594                         goto rnr_nak;
1595                 qp->r_rcv_len = 0;
1596                 if (opcode == OP(SEND_ONLY))
1597                         goto send_last;
1598                 /* FALLTHROUGH */
1599         case OP(SEND_LAST_WITH_IMMEDIATE):
1600         send_last_imm:
1601                 if (header_in_data) {
1602                         wc.imm_data = *(__be32 *) data;
1603                         data += sizeof(__be32);
1604                 } else {
1605                         /* Immediate data comes after BTH */
1606                         wc.imm_data = ohdr->u.imm_data;
1607                 }
1608                 hdrsize += 4;
1609                 wc.wc_flags = IB_WC_WITH_IMM;
1610                 /* FALLTHROUGH */
1611         case OP(SEND_LAST):
1612         case OP(RDMA_WRITE_LAST):
1613         send_last:
1614                 /* Get the number of bytes the message was padded by. */
1615                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1616                 /* Check for invalid length. */
1617                 /* XXX LAST len should be >= 1 */
1618                 if (unlikely(tlen < (hdrsize + pad + 4)))
1619                         goto nack_inv;
1620                 /* Don't count the CRC. */
1621                 tlen -= (hdrsize + pad + 4);
1622                 wc.byte_len = tlen + qp->r_rcv_len;
1623                 if (unlikely(wc.byte_len > qp->r_len))
1624                         goto nack_inv;
1625                 ipath_copy_sge(&qp->r_sge, data, tlen);
1626                 atomic_inc(&qp->msn);
1627                 if (opcode == OP(RDMA_WRITE_LAST) ||
1628                     opcode == OP(RDMA_WRITE_ONLY))
1629                         break;
1630                 wc.wr_id = qp->r_wr_id;
1631                 wc.status = IB_WC_SUCCESS;
1632                 wc.opcode = IB_WC_RECV;
1633                 wc.vendor_err = 0;
1634                 wc.qp_num = qp->ibqp.qp_num;
1635                 wc.src_qp = qp->remote_qpn;
1636                 wc.pkey_index = 0;
1637                 wc.slid = qp->remote_ah_attr.dlid;
1638                 wc.sl = qp->remote_ah_attr.sl;
1639                 wc.dlid_path_bits = 0;
1640                 wc.port_num = 0;
1641                 /* Signal completion event if the solicited bit is set. */
1642                 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1643                                (ohdr->bth[0] &
1644                                 __constant_cpu_to_be32(1 << 23)) != 0);
1645                 break;
1646
1647         case OP(RDMA_WRITE_FIRST):
1648         case OP(RDMA_WRITE_ONLY):
1649         case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1650                 /* consume RWQE */
1651                 /* RETH comes after BTH */
1652                 if (!header_in_data)
1653                         reth = &ohdr->u.rc.reth;
1654                 else {
1655                         reth = (struct ib_reth *)data;
1656                         data += sizeof(*reth);
1657                 }
1658                 hdrsize += sizeof(*reth);
1659                 qp->r_len = be32_to_cpu(reth->length);
1660                 qp->r_rcv_len = 0;
1661                 if (qp->r_len != 0) {
1662                         u32 rkey = be32_to_cpu(reth->rkey);
1663                         u64 vaddr = be64_to_cpu(reth->vaddr);
1664                         int ok;
1665
1666                         /* Check rkey & NAK */
1667                         ok = ipath_rkey_ok(dev, &qp->r_sge,
1668                                            qp->r_len, vaddr, rkey,
1669                                            IB_ACCESS_REMOTE_WRITE);
1670                         if (unlikely(!ok)) {
1671                         nack_acc:
1672                                 /*
1673                                  * A NAK will ACK earlier sends and RDMA
1674                                  * writes.  Don't queue the NAK if a RDMA
1675                                  * read, atomic, or NAK is pending though.
1676                                  */
1677                                 spin_lock(&qp->s_lock);
1678                                 if (qp->s_ack_state >=
1679                                     OP(RDMA_READ_REQUEST) &&
1680                                     qp->s_ack_state !=
1681                                     IB_OPCODE_ACKNOWLEDGE) {
1682                                         spin_unlock(&qp->s_lock);
1683                                         goto done;
1684                                 }
1685                                 /* XXX Flush WQEs */
1686                                 qp->state = IB_QPS_ERR;
1687                                 qp->s_ack_state = OP(RDMA_WRITE_ONLY);
1688                                 qp->s_nak_state =
1689                                         IB_NAK_REMOTE_ACCESS_ERROR;
1690                                 qp->s_ack_psn = qp->r_psn;
1691                                 goto resched;
1692                         }
1693                 } else {
1694                         qp->r_sge.sg_list = NULL;
1695                         qp->r_sge.sge.mr = NULL;
1696                         qp->r_sge.sge.vaddr = NULL;
1697                         qp->r_sge.sge.length = 0;
1698                         qp->r_sge.sge.sge_length = 0;
1699                 }
1700                 if (unlikely(!(qp->qp_access_flags &
1701                                IB_ACCESS_REMOTE_WRITE)))
1702                         goto nack_acc;
1703                 if (opcode == OP(RDMA_WRITE_FIRST))
1704                         goto send_middle;
1705                 else if (opcode == OP(RDMA_WRITE_ONLY))
1706                         goto send_last;
1707                 if (!ipath_get_rwqe(qp, 1))
1708                         goto rnr_nak;
1709                 goto send_last_imm;
1710
1711         case OP(RDMA_READ_REQUEST):
1712                 /* RETH comes after BTH */
1713                 if (!header_in_data)
1714                         reth = &ohdr->u.rc.reth;
1715                 else {
1716                         reth = (struct ib_reth *)data;
1717                         data += sizeof(*reth);
1718                 }
1719                 spin_lock(&qp->s_lock);
1720                 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1721                     qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1722                         spin_unlock(&qp->s_lock);
1723                         goto done;
1724                 }
1725                 qp->s_rdma_len = be32_to_cpu(reth->length);
1726                 if (qp->s_rdma_len != 0) {
1727                         u32 rkey = be32_to_cpu(reth->rkey);
1728                         u64 vaddr = be64_to_cpu(reth->vaddr);
1729                         int ok;
1730
1731                         /* Check rkey & NAK */
1732                         ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1733                                            qp->s_rdma_len, vaddr, rkey,
1734                                            IB_ACCESS_REMOTE_READ);
1735                         if (unlikely(!ok)) {
1736                                 spin_unlock(&qp->s_lock);
1737                                 goto nack_acc;
1738                         }
1739                         /*
1740                          * Update the next expected PSN.  We add 1 later
1741                          * below, so only add the remainder here.
1742                          */
1743                         if (qp->s_rdma_len > pmtu)
1744                                 qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
1745                 } else {
1746                         qp->s_rdma_sge.sg_list = NULL;
1747                         qp->s_rdma_sge.num_sge = 0;
1748                         qp->s_rdma_sge.sge.mr = NULL;
1749                         qp->s_rdma_sge.sge.vaddr = NULL;
1750                         qp->s_rdma_sge.sge.length = 0;
1751                         qp->s_rdma_sge.sge.sge_length = 0;
1752                 }
1753                 if (unlikely(!(qp->qp_access_flags &
1754                                IB_ACCESS_REMOTE_READ)))
1755                         goto nack_acc;
1756                 /*
1757                  * We need to increment the MSN here instead of when we
1758                  * finish sending the result since a duplicate request would
1759                  * increment it more than once.
1760                  */
1761                 atomic_inc(&qp->msn);
1762                 qp->s_ack_state = opcode;
1763                 qp->s_nak_state = 0;
1764                 qp->s_ack_psn = psn;
1765                 qp->r_psn++;
1766                 qp->r_state = opcode;
1767                 goto rdmadone;
1768
1769         case OP(COMPARE_SWAP):
1770         case OP(FETCH_ADD): {
1771                 struct ib_atomic_eth *ateth;
1772                 u64 vaddr;
1773                 u64 sdata;
1774                 u32 rkey;
1775
1776                 if (!header_in_data)
1777                         ateth = &ohdr->u.atomic_eth;
1778                 else {
1779                         ateth = (struct ib_atomic_eth *)data;
1780                         data += sizeof(*ateth);
1781                 }
1782                 vaddr = be64_to_cpu(ateth->vaddr);
1783                 if (unlikely(vaddr & (sizeof(u64) - 1)))
1784                         goto nack_inv;
1785                 rkey = be32_to_cpu(ateth->rkey);
1786                 /* Check rkey & NAK */
1787                 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge,
1788                                             sizeof(u64), vaddr, rkey,
1789                                             IB_ACCESS_REMOTE_ATOMIC)))
1790                         goto nack_acc;
1791                 if (unlikely(!(qp->qp_access_flags &
1792                                IB_ACCESS_REMOTE_ATOMIC)))
1793                         goto nack_acc;
1794                 /* Perform atomic OP and save result. */
1795                 sdata = be64_to_cpu(ateth->swap_data);
1796                 spin_lock(&dev->pending_lock);
1797                 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
1798                 if (opcode == OP(FETCH_ADD))
1799                         *(u64 *) qp->r_sge.sge.vaddr =
1800                                 qp->r_atomic_data + sdata;
1801                 else if (qp->r_atomic_data ==
1802                          be64_to_cpu(ateth->compare_data))
1803                         *(u64 *) qp->r_sge.sge.vaddr = sdata;
1804                 spin_unlock(&dev->pending_lock);
1805                 atomic_inc(&qp->msn);
1806                 qp->r_atomic_psn = psn & IPS_PSN_MASK;
1807                 psn |= 1 << 31;
1808                 break;
1809         }
1810
1811         default:
1812                 /* Drop packet for unknown opcodes. */
1813                 goto done;
1814         }
1815         qp->r_psn++;
1816         qp->r_state = opcode;
1817         /* Send an ACK if requested or required. */
1818         if (psn & (1 << 31)) {
1819                 /*
1820                  * Coalesce ACKs unless there is a RDMA READ or
1821                  * ATOMIC pending.
1822                  */
1823                 spin_lock(&qp->s_lock);
1824                 if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
1825                     qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
1826                         qp->s_ack_state = opcode;
1827                         qp->s_nak_state = 0;
1828                         qp->s_ack_psn = psn;
1829                         qp->s_ack_atomic = qp->r_atomic_data;
1830                         goto resched;
1831                 }
1832                 spin_unlock(&qp->s_lock);
1833         }
1834 done:
1835         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1836         goto bail;
1837
1838 resched:
1839         /*
1840          * Try to send ACK right away but not if ipath_do_rc_send() is
1841          * active.
1842          */
1843         if (qp->s_hdrwords == 0 &&
1844             (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
1845              qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
1846                 send_rc_ack(qp);
1847
1848 rdmadone:
1849         spin_unlock(&qp->s_lock);
1850         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1851
1852         /* Call ipath_do_rc_send() in another thread. */
1853         tasklet_hi_schedule(&qp->s_task);
1854
1855 bail:
1856         return;
1857 }