[PATCH] IB/ipath: fix an indenting problem
authorBryan O'Sullivan <bos@pathscale.com>
Sat, 1 Jul 2006 11:35:51 +0000 (04:35 -0700)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sat, 1 Jul 2006 16:55:58 +0000 (09:55 -0700)
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com>
Cc: "Michael S. Tsirkin" <mst@mellanox.co.il>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
drivers/infiniband/hw/ipath/ipath_rc.c

index 0ca89af..720cb3a 100644 (file)
@@ -1053,32 +1053,32 @@ static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
                        goto ack_done;
                }
        rdma_read:
-       if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
-               goto ack_done;
-       if (unlikely(tlen != (hdrsize + pmtu + 4)))
-               goto ack_done;
-       if (unlikely(pmtu >= qp->s_len))
-               goto ack_done;
-       /* We got a response so update the timeout. */
-       if (unlikely(qp->s_last == qp->s_tail ||
-                    get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
-                    IB_WR_RDMA_READ))
-               goto ack_done;
-       spin_lock(&dev->pending_lock);
-       if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
-               list_move_tail(&qp->timerwait,
-                              &dev->pending[dev->pending_index]);
-       spin_unlock(&dev->pending_lock);
-       /*
-        * Update the RDMA receive state but do the copy w/o holding the
-        * locks and blocking interrupts.  XXX Yet another place that
-        * affects relaxed RDMA order since we don't want s_sge modified.
-        */
-       qp->s_len -= pmtu;
-       qp->s_last_psn = psn;
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       ipath_copy_sge(&qp->s_sge, data, pmtu);
-       goto bail;
+               if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
+                       goto ack_done;
+               if (unlikely(tlen != (hdrsize + pmtu + 4)))
+                       goto ack_done;
+               if (unlikely(pmtu >= qp->s_len))
+                       goto ack_done;
+               /* We got a response so update the timeout. */
+               if (unlikely(qp->s_last == qp->s_tail ||
+                            get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
+                            IB_WR_RDMA_READ))
+                       goto ack_done;
+               spin_lock(&dev->pending_lock);
+               if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
+                       list_move_tail(&qp->timerwait,
+                                      &dev->pending[dev->pending_index]);
+               spin_unlock(&dev->pending_lock);
+               /*
+                * Update the RDMA receive state but do the copy w/o holding the
+                * locks and blocking interrupts.  XXX Yet another place that
+                * affects relaxed RDMA order since we don't want s_sge modified.
+                */
+               qp->s_len -= pmtu;
+               qp->s_last_psn = psn;
+               spin_unlock_irqrestore(&qp->s_lock, flags);
+               ipath_copy_sge(&qp->s_sge, data, pmtu);
+               goto bail;
 
        case OP(RDMA_READ_RESPONSE_LAST):
                /* ACKs READ req. */