Merge branch 'for_paulus' of master.kernel.org:/pub/scm/linux/kernel/git/galak/powerpc
[pandora-kernel.git] / drivers / infiniband / hw / mthca / mthca_qp.c
index f673c46..f37b0e3 100644 (file)
@@ -248,6 +248,9 @@ void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
                return;
        }
 
+       if (event_type == IB_EVENT_PATH_MIG)
+               qp->port = qp->alt_port;
+
        event.device      = &dev->ib_dev;
        event.event       = event_type;
        event.element.qp  = &qp->ibqp;
@@ -392,10 +395,16 @@ static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
 {
        memset(ib_ah_attr, 0, sizeof *path);
        ib_ah_attr->port_num      = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
+
+       if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
+               return;
+
        ib_ah_attr->dlid          = be16_to_cpu(path->rlid);
        ib_ah_attr->sl            = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
        ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
-       ib_ah_attr->static_rate   = path->static_rate & 0x7;
+       ib_ah_attr->static_rate   = mthca_rate_to_ib(dev,
+                                                    path->static_rate & 0x7,
+                                                    ib_ah_attr->port_num);
        ib_ah_attr->ah_flags      = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
        if (ib_ah_attr->ah_flags) {
                ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
@@ -455,8 +464,10 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
        qp_attr->cap.max_recv_sge    = qp->rq.max_gs;
        qp_attr->cap.max_inline_data = qp->max_inline_data;
 
-       to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
-       to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+       if (qp->transport == RC || qp->transport == UC) {
+               to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
+               to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
+       }
 
        qp_attr->pkey_index     = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
        qp_attr->alt_pkey_index = be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
@@ -483,13 +494,20 @@ out:
        return err;
 }
 
-static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
+static int mthca_path_set(struct mthca_dev *dev, struct ib_ah_attr *ah,
+                         struct mthca_qp_path *path, u8 port)
 {
        path->g_mylmc     = ah->src_path_bits & 0x7f;
        path->rlid        = cpu_to_be16(ah->dlid);
-       path->static_rate = !!ah->static_rate;
+       path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
 
        if (ah->ah_flags & IB_AH_GRH) {
+               if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
+                       mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
+                                 ah->grh.sgid_index, dev->limits.gid_table_len-1);
+                       return -1;
+               }
+
                path->g_mylmc   |= 1 << 7;
                path->mgid_index = ah->grh.sgid_index;
                path->hop_limit  = ah->grh.hop_limit;
@@ -500,6 +518,8 @@ static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
                memcpy(path->rgid, ah->grh.dgid.raw, 16);
        } else
                path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
+
+       return 0;
 }
 
 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
@@ -592,8 +612,14 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 
        if (qp->transport == MLX || qp->transport == UD)
                qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
-       else if (attr_mask & IB_QP_PATH_MTU)
+       else if (attr_mask & IB_QP_PATH_MTU) {
+               if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
+                       mthca_dbg(dev, "path MTU (%u) is invalid\n",
+                                 attr->path_mtu);
+                       return -EINVAL;
+               }
                qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
+       }
 
        if (mthca_is_memfree(dev)) {
                if (qp->rq.max)
@@ -619,7 +645,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 
        if (qp->transport == MLX)
                qp_context->pri_path.port_pkey |=
-                       cpu_to_be32(to_msqp(qp)->port << 24);
+                       cpu_to_be32(qp->port << 24);
        else {
                if (attr_mask & IB_QP_PORT) {
                        qp_context->pri_path.port_pkey |=
@@ -642,7 +668,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        }
 
        if (attr_mask & IB_QP_AV) {
-               mthca_path_set(&attr->ah_attr, &qp_context->pri_path);
+               if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
+                                  attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
+                       return -EINVAL;
+
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
        }
 
@@ -664,7 +693,10 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                        return -EINVAL;
                }
 
-               mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
+               if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
+                                  attr->alt_ah_attr.port_num))
+                       return -EINVAL;
+
                qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
                                                              attr->alt_port_num << 24);
                qp_context->alt_path.ackto = attr->alt_timeout << 3;
@@ -758,21 +790,24 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
 
        err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
                              mailbox, sqd_event, &status);
+       if (err)
+               goto out;
        if (status) {
                mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
                           cur_state, new_state, status);
                err = -EINVAL;
+               goto out;
        }
 
-       if (!err) {
-               qp->state = new_state;
-               if (attr_mask & IB_QP_ACCESS_FLAGS)
-                       qp->atomic_rd_en = attr->qp_access_flags;
-               if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-                       qp->resp_depth = attr->max_dest_rd_atomic;
-       }
-
-       mthca_free_mailbox(dev, mailbox);
+       qp->state = new_state;
+       if (attr_mask & IB_QP_ACCESS_FLAGS)
+               qp->atomic_rd_en = attr->qp_access_flags;
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               qp->resp_depth = attr->max_dest_rd_atomic;
+       if (attr_mask & IB_QP_PORT)
+               qp->port = attr->port_num;
+       if (attr_mask & IB_QP_ALT_PATH)
+               qp->alt_port = attr->alt_port_num;
 
        if (is_sqp(dev, qp))
                store_attrs(to_msqp(qp), attr, attr_mask);
@@ -784,20 +819,20 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        if (is_qp0(dev, qp)) {
                if (cur_state != IB_QPS_RTR &&
                    new_state == IB_QPS_RTR)
-                       init_port(dev, to_msqp(qp)->port);
+                       init_port(dev, qp->port);
 
                if (cur_state != IB_QPS_RESET &&
                    cur_state != IB_QPS_ERR &&
                    (new_state == IB_QPS_RESET ||
                     new_state == IB_QPS_ERR))
-                       mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
+                       mthca_CLOSE_IB(dev, qp->port, &status);
        }
 
        /*
         * If we moved a kernel QP to RESET, clean up all old CQ
         * entries and reinitialize the QP.
         */
-       if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
+       if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
                mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
                               qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
                if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
@@ -816,6 +851,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                }
        }
 
+out:
+       mthca_free_mailbox(dev, mailbox);
        return err;
 }
 
@@ -1177,10 +1214,6 @@ int mthca_alloc_qp(struct mthca_dev *dev,
 {
        int err;
 
-       err = mthca_set_qp_size(dev, cap, pd, qp);
-       if (err)
-               return err;
-
        switch (type) {
        case IB_QPT_RC: qp->transport = RC; break;
        case IB_QPT_UC: qp->transport = UC; break;
@@ -1188,10 +1221,17 @@ int mthca_alloc_qp(struct mthca_dev *dev,
        default: return -EINVAL;
        }
 
+       err = mthca_set_qp_size(dev, cap, pd, qp);
+       if (err)
+               return err;
+
        qp->qpn = mthca_alloc(&dev->qp_table.alloc);
        if (qp->qpn == -1)
                return -ENOMEM;
 
+       /* initialize port to zero for error-catching. */
+       qp->port = 0;
+
        err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
                                    send_policy, qp);
        if (err) {
@@ -1220,6 +1260,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
        u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
        int err;
 
+       sqp->qp.transport = MLX;
        err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
        if (err)
                return err;
@@ -1240,7 +1281,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
        if (err)
                goto err_out;
 
-       sqp->port = port;
+       sqp->qp.port      = port;
        sqp->qp.qpn       = mqpn;
        sqp->qp.transport = MLX;
 
@@ -1383,10 +1424,10 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
                sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
        sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
        if (!sqp->qp.ibqp.qp_num)
-               ib_get_cached_pkey(&dev->ib_dev, sqp->port,
+               ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
                                   sqp->pkey_index, &pkey);
        else
-               ib_get_cached_pkey(&dev->ib_dev, sqp->port,
+               ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
                                   wr->wr.ud.pkey_index, &pkey);
        sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
        sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
@@ -1980,8 +2021,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                wmb();
                ((struct mthca_next_seg *) prev_wqe)->ee_nds =
                        cpu_to_be32(MTHCA_NEXT_DBD | size |
-                                    ((wr->send_flags & IB_SEND_FENCE) ?
-                                    MTHCA_NEXT_FENCE : 0));
+                                   ((wr->send_flags & IB_SEND_FENCE) ?
+                                    MTHCA_NEXT_FENCE : 0));
 
                if (!size0) {
                        size0 = size;
@@ -2183,7 +2224,7 @@ int __devinit mthca_init_qp_table(struct mthca_dev *dev)
        return err;
 }
 
-void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
+void mthca_cleanup_qp_table(struct mthca_dev *dev)
 {
        int i;
        u8 status;