Pull acpi_bus_register_driver into release branch
[pandora-kernel.git] / drivers / infiniband / hw / ipath / ipath_uc.c
1 /*
2  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "ipath_verbs.h"
34 #include "ips_common.h"
35
36 /* cut down ridiculously long IB macro names */
37 #define OP(x) IB_OPCODE_UC_##x
38
39 static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
40                                struct ib_wc *wc)
41 {
42         if (++qp->s_last == qp->s_size)
43                 qp->s_last = 0;
44         if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
45             (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
46                 wc->wr_id = wqe->wr.wr_id;
47                 wc->status = IB_WC_SUCCESS;
48                 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
49                 wc->vendor_err = 0;
50                 wc->byte_len = wqe->length;
51                 wc->qp_num = qp->ibqp.qp_num;
52                 wc->src_qp = qp->remote_qpn;
53                 wc->pkey_index = 0;
54                 wc->slid = qp->remote_ah_attr.dlid;
55                 wc->sl = qp->remote_ah_attr.sl;
56                 wc->dlid_path_bits = 0;
57                 wc->port_num = 0;
58                 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 0);
59         }
60         wqe = get_swqe_ptr(qp, qp->s_last);
61 }
62
63 /**
64  * ipath_do_uc_send - do a send on a UC queue
65  * @data: contains a pointer to the QP to send on
66  *
67  * Process entries in the send work queue until the queue is exhausted.
68  * Only allow one CPU to send a packet per QP (tasklet).
69  * Otherwise, after we drop the QP lock, two threads could send
70  * packets out of order.
71  * This is similar to ipath_do_rc_send() below except we don't have
72  * timeouts or resends.
73  */
74 void ipath_do_uc_send(unsigned long data)
75 {
76         struct ipath_qp *qp = (struct ipath_qp *)data;
77         struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
78         struct ipath_swqe *wqe;
79         unsigned long flags;
80         u16 lrh0;
81         u32 hwords;
82         u32 nwords;
83         u32 extra_bytes;
84         u32 bth0;
85         u32 bth2;
86         u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
87         u32 len;
88         struct ipath_other_headers *ohdr;
89         struct ib_wc wc;
90
91         if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
92                 goto bail;
93
94         if (unlikely(qp->remote_ah_attr.dlid ==
95                      ipath_layer_get_lid(dev->dd))) {
96                 /* Pass in an uninitialized ib_wc to save stack space. */
97                 ipath_ruc_loopback(qp, &wc);
98                 clear_bit(IPATH_S_BUSY, &qp->s_flags);
99                 goto bail;
100         }
101
102         ohdr = &qp->s_hdr.u.oth;
103         if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
104                 ohdr = &qp->s_hdr.u.l.oth;
105
106 again:
107         /* Check for a constructed packet to be sent. */
108         if (qp->s_hdrwords != 0) {
109                         /*
110                          * If no PIO bufs are available, return.
111                          * An interrupt will call ipath_ib_piobufavail()
112                          * when one is available.
113                          */
114                         if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
115                                              (u32 *) &qp->s_hdr,
116                                              qp->s_cur_size,
117                                              qp->s_cur_sge)) {
118                                 ipath_no_bufs_available(qp, dev);
119                                 goto bail;
120                         }
121                         dev->n_unicast_xmit++;
122                 /* Record that we sent the packet and s_hdr is empty. */
123                 qp->s_hdrwords = 0;
124         }
125
126         lrh0 = IPS_LRH_BTH;
127         /* header size in 32-bit words LRH+BTH = (8+12)/4. */
128         hwords = 5;
129
130         /*
131          * The lock is needed to synchronize between
132          * setting qp->s_ack_state and post_send().
133          */
134         spin_lock_irqsave(&qp->s_lock, flags);
135
136         if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK))
137                 goto done;
138
139         bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
140
141         /* Send a request. */
142         wqe = get_swqe_ptr(qp, qp->s_last);
143         switch (qp->s_state) {
144         default:
145                 /*
146                  * Signal the completion of the last send (if there is
147                  * one).
148                  */
149                 if (qp->s_last != qp->s_tail)
150                         complete_last_send(qp, wqe, &wc);
151
152                 /* Check if send work queue is empty. */
153                 if (qp->s_tail == qp->s_head)
154                         goto done;
155                 /*
156                  * Start a new request.
157                  */
158                 qp->s_psn = wqe->psn = qp->s_next_psn;
159                 qp->s_sge.sge = wqe->sg_list[0];
160                 qp->s_sge.sg_list = wqe->sg_list + 1;
161                 qp->s_sge.num_sge = wqe->wr.num_sge;
162                 qp->s_len = len = wqe->length;
163                 switch (wqe->wr.opcode) {
164                 case IB_WR_SEND:
165                 case IB_WR_SEND_WITH_IMM:
166                         if (len > pmtu) {
167                                 qp->s_state = OP(SEND_FIRST);
168                                 len = pmtu;
169                                 break;
170                         }
171                         if (wqe->wr.opcode == IB_WR_SEND)
172                                 qp->s_state = OP(SEND_ONLY);
173                         else {
174                                 qp->s_state =
175                                         OP(SEND_ONLY_WITH_IMMEDIATE);
176                                 /* Immediate data comes after the BTH */
177                                 ohdr->u.imm_data = wqe->wr.imm_data;
178                                 hwords += 1;
179                         }
180                         if (wqe->wr.send_flags & IB_SEND_SOLICITED)
181                                 bth0 |= 1 << 23;
182                         break;
183
184                 case IB_WR_RDMA_WRITE:
185                 case IB_WR_RDMA_WRITE_WITH_IMM:
186                         ohdr->u.rc.reth.vaddr =
187                                 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
188                         ohdr->u.rc.reth.rkey =
189                                 cpu_to_be32(wqe->wr.wr.rdma.rkey);
190                         ohdr->u.rc.reth.length = cpu_to_be32(len);
191                         hwords += sizeof(struct ib_reth) / 4;
192                         if (len > pmtu) {
193                                 qp->s_state = OP(RDMA_WRITE_FIRST);
194                                 len = pmtu;
195                                 break;
196                         }
197                         if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
198                                 qp->s_state = OP(RDMA_WRITE_ONLY);
199                         else {
200                                 qp->s_state =
201                                         OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
202                                 /* Immediate data comes after the RETH */
203                                 ohdr->u.rc.imm_data = wqe->wr.imm_data;
204                                 hwords += 1;
205                                 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
206                                         bth0 |= 1 << 23;
207                         }
208                         break;
209
210                 default:
211                         goto done;
212                 }
213                 if (++qp->s_tail >= qp->s_size)
214                         qp->s_tail = 0;
215                 break;
216
217         case OP(SEND_FIRST):
218                 qp->s_state = OP(SEND_MIDDLE);
219                 /* FALLTHROUGH */
220         case OP(SEND_MIDDLE):
221                 len = qp->s_len;
222                 if (len > pmtu) {
223                         len = pmtu;
224                         break;
225                 }
226                 if (wqe->wr.opcode == IB_WR_SEND)
227                         qp->s_state = OP(SEND_LAST);
228                 else {
229                         qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
230                         /* Immediate data comes after the BTH */
231                         ohdr->u.imm_data = wqe->wr.imm_data;
232                         hwords += 1;
233                 }
234                 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
235                         bth0 |= 1 << 23;
236                 break;
237
238         case OP(RDMA_WRITE_FIRST):
239                 qp->s_state = OP(RDMA_WRITE_MIDDLE);
240                 /* FALLTHROUGH */
241         case OP(RDMA_WRITE_MIDDLE):
242                 len = qp->s_len;
243                 if (len > pmtu) {
244                         len = pmtu;
245                         break;
246                 }
247                 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
248                         qp->s_state = OP(RDMA_WRITE_LAST);
249                 else {
250                         qp->s_state =
251                                 OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
252                         /* Immediate data comes after the BTH */
253                         ohdr->u.imm_data = wqe->wr.imm_data;
254                         hwords += 1;
255                         if (wqe->wr.send_flags & IB_SEND_SOLICITED)
256                                 bth0 |= 1 << 23;
257                 }
258                 break;
259         }
260         bth2 = qp->s_next_psn++ & IPS_PSN_MASK;
261         qp->s_len -= len;
262         bth0 |= qp->s_state << 24;
263
264         spin_unlock_irqrestore(&qp->s_lock, flags);
265
266         /* Construct the header. */
267         extra_bytes = (4 - len) & 3;
268         nwords = (len + extra_bytes) >> 2;
269         if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
270                 /* Header size in 32-bit words. */
271                 hwords += 10;
272                 lrh0 = IPS_LRH_GRH;
273                 qp->s_hdr.u.l.grh.version_tclass_flow =
274                         cpu_to_be32((6 << 28) |
275                                     (qp->remote_ah_attr.grh.traffic_class
276                                      << 20) |
277                                     qp->remote_ah_attr.grh.flow_label);
278                 qp->s_hdr.u.l.grh.paylen =
279                         cpu_to_be16(((hwords - 12) + nwords +
280                                      SIZE_OF_CRC) << 2);
281                 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
282                 qp->s_hdr.u.l.grh.next_hdr = 0x1B;
283                 qp->s_hdr.u.l.grh.hop_limit =
284                         qp->remote_ah_attr.grh.hop_limit;
285                 /* The SGID is 32-bit aligned. */
286                 qp->s_hdr.u.l.grh.sgid.global.subnet_prefix =
287                         dev->gid_prefix;
288                 qp->s_hdr.u.l.grh.sgid.global.interface_id =
289                         ipath_layer_get_guid(dev->dd);
290                 qp->s_hdr.u.l.grh.dgid = qp->remote_ah_attr.grh.dgid;
291         }
292         qp->s_hdrwords = hwords;
293         qp->s_cur_sge = &qp->s_sge;
294         qp->s_cur_size = len;
295         lrh0 |= qp->remote_ah_attr.sl << 4;
296         qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
297         /* DEST LID */
298         qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
299         qp->s_hdr.lrh[2] = cpu_to_be16(hwords + nwords + SIZE_OF_CRC);
300         qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
301         bth0 |= extra_bytes << 20;
302         ohdr->bth[0] = cpu_to_be32(bth0);
303         ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
304         ohdr->bth[2] = cpu_to_be32(bth2);
305
306         /* Check for more work to do. */
307         goto again;
308
309 done:
310         spin_unlock_irqrestore(&qp->s_lock, flags);
311         clear_bit(IPATH_S_BUSY, &qp->s_flags);
312
313 bail:
314         return;
315 }
316
317 /**
318  * ipath_uc_rcv - handle an incoming UC packet
319  * @dev: the device the packet came in on
320  * @hdr: the header of the packet
321  * @has_grh: true if the packet has a GRH
322  * @data: the packet data
323  * @tlen: the length of the packet
324  * @qp: the QP for this packet.
325  *
326  * This is called from ipath_qp_rcv() to process an incoming UC packet
327  * for the given QP.
328  * Called at interrupt level.
329  */
330 void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
331                   int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
332 {
333         struct ipath_other_headers *ohdr;
334         int opcode;
335         u32 hdrsize;
336         u32 psn;
337         u32 pad;
338         unsigned long flags;
339         struct ib_wc wc;
340         u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
341         struct ib_reth *reth;
342         int header_in_data;
343
344         /* Check for GRH */
345         if (!has_grh) {
346                 ohdr = &hdr->u.oth;
347                 hdrsize = 8 + 12;       /* LRH + BTH */
348                 psn = be32_to_cpu(ohdr->bth[2]);
349                 header_in_data = 0;
350         } else {
351                 ohdr = &hdr->u.l.oth;
352                 hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
353                 /*
354                  * The header with GRH is 60 bytes and the
355                  * core driver sets the eager header buffer
356                  * size to 56 bytes so the last 4 bytes of
357                  * the BTH header (PSN) is in the data buffer.
358                  */
359                 header_in_data =
360                         ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
361                 if (header_in_data) {
362                         psn = be32_to_cpu(((__be32 *) data)[0]);
363                         data += sizeof(__be32);
364                 } else
365                         psn = be32_to_cpu(ohdr->bth[2]);
366         }
367         /*
368          * The opcode is in the low byte when its in network order
369          * (top byte when in host order).
370          */
371         opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
372
373         wc.imm_data = 0;
374         wc.wc_flags = 0;
375
376         spin_lock_irqsave(&qp->r_rq.lock, flags);
377
378         /* Compare the PSN verses the expected PSN. */
379         if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
380                 /*
381                  * Handle a sequence error.
382                  * Silently drop any current message.
383                  */
384                 qp->r_psn = psn;
385         inv:
386                 qp->r_state = OP(SEND_LAST);
387                 switch (opcode) {
388                 case OP(SEND_FIRST):
389                 case OP(SEND_ONLY):
390                 case OP(SEND_ONLY_WITH_IMMEDIATE):
391                         goto send_first;
392
393                 case OP(RDMA_WRITE_FIRST):
394                 case OP(RDMA_WRITE_ONLY):
395                 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
396                         goto rdma_first;
397
398                 default:
399                         dev->n_pkt_drops++;
400                         goto done;
401                 }
402         }
403
404         /* Check for opcode sequence errors. */
405         switch (qp->r_state) {
406         case OP(SEND_FIRST):
407         case OP(SEND_MIDDLE):
408                 if (opcode == OP(SEND_MIDDLE) ||
409                     opcode == OP(SEND_LAST) ||
410                     opcode == OP(SEND_LAST_WITH_IMMEDIATE))
411                         break;
412                 goto inv;
413
414         case OP(RDMA_WRITE_FIRST):
415         case OP(RDMA_WRITE_MIDDLE):
416                 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
417                     opcode == OP(RDMA_WRITE_LAST) ||
418                     opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
419                         break;
420                 goto inv;
421
422         default:
423                 if (opcode == OP(SEND_FIRST) ||
424                     opcode == OP(SEND_ONLY) ||
425                     opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
426                     opcode == OP(RDMA_WRITE_FIRST) ||
427                     opcode == OP(RDMA_WRITE_ONLY) ||
428                     opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
429                         break;
430                 goto inv;
431         }
432
433         /* OK, process the packet. */
434         switch (opcode) {
435         case OP(SEND_FIRST):
436         case OP(SEND_ONLY):
437         case OP(SEND_ONLY_WITH_IMMEDIATE):
438         send_first:
439                 if (qp->r_reuse_sge) {
440                         qp->r_reuse_sge = 0;
441                         qp->r_sge = qp->s_rdma_sge;
442                 } else if (!ipath_get_rwqe(qp, 0)) {
443                         dev->n_pkt_drops++;
444                         goto done;
445                 }
446                 /* Save the WQE so we can reuse it in case of an error. */
447                 qp->s_rdma_sge = qp->r_sge;
448                 qp->r_rcv_len = 0;
449                 if (opcode == OP(SEND_ONLY))
450                         goto send_last;
451                 else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
452                         goto send_last_imm;
453                 /* FALLTHROUGH */
454         case OP(SEND_MIDDLE):
455                 /* Check for invalid length PMTU or posted rwqe len. */
456                 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
457                         qp->r_reuse_sge = 1;
458                         dev->n_pkt_drops++;
459                         goto done;
460                 }
461                 qp->r_rcv_len += pmtu;
462                 if (unlikely(qp->r_rcv_len > qp->r_len)) {
463                         qp->r_reuse_sge = 1;
464                         dev->n_pkt_drops++;
465                         goto done;
466                 }
467                 ipath_copy_sge(&qp->r_sge, data, pmtu);
468                 break;
469
470         case OP(SEND_LAST_WITH_IMMEDIATE):
471         send_last_imm:
472                 if (header_in_data) {
473                         wc.imm_data = *(__be32 *) data;
474                         data += sizeof(__be32);
475                 } else {
476                         /* Immediate data comes after BTH */
477                         wc.imm_data = ohdr->u.imm_data;
478                 }
479                 hdrsize += 4;
480                 wc.wc_flags = IB_WC_WITH_IMM;
481                 /* FALLTHROUGH */
482         case OP(SEND_LAST):
483         send_last:
484                 /* Get the number of bytes the message was padded by. */
485                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
486                 /* Check for invalid length. */
487                 /* XXX LAST len should be >= 1 */
488                 if (unlikely(tlen < (hdrsize + pad + 4))) {
489                         qp->r_reuse_sge = 1;
490                         dev->n_pkt_drops++;
491                         goto done;
492                 }
493                 /* Don't count the CRC. */
494                 tlen -= (hdrsize + pad + 4);
495                 wc.byte_len = tlen + qp->r_rcv_len;
496                 if (unlikely(wc.byte_len > qp->r_len)) {
497                         qp->r_reuse_sge = 1;
498                         dev->n_pkt_drops++;
499                         goto done;
500                 }
501                 /* XXX Need to free SGEs */
502         last_imm:
503                 ipath_copy_sge(&qp->r_sge, data, tlen);
504                 wc.wr_id = qp->r_wr_id;
505                 wc.status = IB_WC_SUCCESS;
506                 wc.opcode = IB_WC_RECV;
507                 wc.vendor_err = 0;
508                 wc.qp_num = qp->ibqp.qp_num;
509                 wc.src_qp = qp->remote_qpn;
510                 wc.pkey_index = 0;
511                 wc.slid = qp->remote_ah_attr.dlid;
512                 wc.sl = qp->remote_ah_attr.sl;
513                 wc.dlid_path_bits = 0;
514                 wc.port_num = 0;
515                 /* Signal completion event if the solicited bit is set. */
516                 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
517                                (ohdr->bth[0] &
518                                 __constant_cpu_to_be32(1 << 23)) != 0);
519                 break;
520
521         case OP(RDMA_WRITE_FIRST):
522         case OP(RDMA_WRITE_ONLY):
523         case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
524         rdma_first:
525                 /* RETH comes after BTH */
526                 if (!header_in_data)
527                         reth = &ohdr->u.rc.reth;
528                 else {
529                         reth = (struct ib_reth *)data;
530                         data += sizeof(*reth);
531                 }
532                 hdrsize += sizeof(*reth);
533                 qp->r_len = be32_to_cpu(reth->length);
534                 qp->r_rcv_len = 0;
535                 if (qp->r_len != 0) {
536                         u32 rkey = be32_to_cpu(reth->rkey);
537                         u64 vaddr = be64_to_cpu(reth->vaddr);
538
539                         /* Check rkey */
540                         if (unlikely(!ipath_rkey_ok(
541                                              dev, &qp->r_sge, qp->r_len,
542                                              vaddr, rkey,
543                                              IB_ACCESS_REMOTE_WRITE))) {
544                                 dev->n_pkt_drops++;
545                                 goto done;
546                         }
547                 } else {
548                         qp->r_sge.sg_list = NULL;
549                         qp->r_sge.sge.mr = NULL;
550                         qp->r_sge.sge.vaddr = NULL;
551                         qp->r_sge.sge.length = 0;
552                         qp->r_sge.sge.sge_length = 0;
553                 }
554                 if (unlikely(!(qp->qp_access_flags &
555                                IB_ACCESS_REMOTE_WRITE))) {
556                         dev->n_pkt_drops++;
557                         goto done;
558                 }
559                 if (opcode == OP(RDMA_WRITE_ONLY))
560                         goto rdma_last;
561                 else if (opcode ==
562                          OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
563                         goto rdma_last_imm;
564                 /* FALLTHROUGH */
565         case OP(RDMA_WRITE_MIDDLE):
566                 /* Check for invalid length PMTU or posted rwqe len. */
567                 if (unlikely(tlen != (hdrsize + pmtu + 4))) {
568                         dev->n_pkt_drops++;
569                         goto done;
570                 }
571                 qp->r_rcv_len += pmtu;
572                 if (unlikely(qp->r_rcv_len > qp->r_len)) {
573                         dev->n_pkt_drops++;
574                         goto done;
575                 }
576                 ipath_copy_sge(&qp->r_sge, data, pmtu);
577                 break;
578
579         case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
580         rdma_last_imm:
581                 /* Get the number of bytes the message was padded by. */
582                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
583                 /* Check for invalid length. */
584                 /* XXX LAST len should be >= 1 */
585                 if (unlikely(tlen < (hdrsize + pad + 4))) {
586                         dev->n_pkt_drops++;
587                         goto done;
588                 }
589                 /* Don't count the CRC. */
590                 tlen -= (hdrsize + pad + 4);
591                 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
592                         dev->n_pkt_drops++;
593                         goto done;
594                 }
595                 if (qp->r_reuse_sge) {
596                         qp->r_reuse_sge = 0;
597                 } else if (!ipath_get_rwqe(qp, 1)) {
598                         dev->n_pkt_drops++;
599                         goto done;
600                 }
601                 if (header_in_data) {
602                         wc.imm_data = *(__be32 *) data;
603                         data += sizeof(__be32);
604                 } else {
605                         /* Immediate data comes after BTH */
606                         wc.imm_data = ohdr->u.imm_data;
607                 }
608                 hdrsize += 4;
609                 wc.wc_flags = IB_WC_WITH_IMM;
610                 wc.byte_len = 0;
611                 goto last_imm;
612
613         case OP(RDMA_WRITE_LAST):
614         rdma_last:
615                 /* Get the number of bytes the message was padded by. */
616                 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
617                 /* Check for invalid length. */
618                 /* XXX LAST len should be >= 1 */
619                 if (unlikely(tlen < (hdrsize + pad + 4))) {
620                         dev->n_pkt_drops++;
621                         goto done;
622                 }
623                 /* Don't count the CRC. */
624                 tlen -= (hdrsize + pad + 4);
625                 if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
626                         dev->n_pkt_drops++;
627                         goto done;
628                 }
629                 ipath_copy_sge(&qp->r_sge, data, tlen);
630                 break;
631
632         default:
633                 /* Drop packet for unknown opcodes. */
634                 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
635                 dev->n_pkt_drops++;
636                 goto bail;
637         }
638         qp->r_psn++;
639         qp->r_state = opcode;
640 done:
641         spin_unlock_irqrestore(&qp->r_rq.lock, flags);
642
643 bail:
644         return;
645 }