Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/dtor/input
[pandora-kernel.git] / drivers / infiniband / core / cm.c
index e5dc453..842cd0b 100644 (file)
@@ -88,7 +88,6 @@ struct cm_port {
 struct cm_device {
        struct list_head list;
        struct ib_device *device;
-       __be64 ca_guid;
        struct cm_port port[0];
 };
 
@@ -101,7 +100,7 @@ struct cm_av {
 };
 
 struct cm_work {
-       struct work_struct work;
+       struct delayed_work work;
        struct list_head list;
        struct cm_port *port;
        struct ib_mad_recv_wc *mad_recv_wc;     /* Received MADs */
@@ -161,7 +160,7 @@ struct cm_id_private {
        atomic_t work_count;
 };
 
-static void cm_work_handler(void *data);
+static void cm_work_handler(struct work_struct *work);
 
 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
 {
@@ -668,8 +667,7 @@ static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
                return ERR_PTR(-ENOMEM);
 
        timewait_info->work.local_id = local_id;
-       INIT_WORK(&timewait_info->work.work, cm_work_handler,
-                 &timewait_info->work);
+       INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
        timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
        return timewait_info;
 }
@@ -740,8 +738,8 @@ retest:
                ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
                ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
-                              &cm_id_priv->av.port->cm_dev->ca_guid,
-                              sizeof cm_id_priv->av.port->cm_dev->ca_guid,
+                              &cm_id_priv->id.device->node_guid,
+                              sizeof cm_id_priv->id.device->node_guid,
                               NULL, 0);
                break;
        case IB_CM_REQ_RCVD:
@@ -884,7 +882,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
 
        req_msg->local_comm_id = cm_id_priv->id.local_id;
        req_msg->service_id = param->service_id;
-       req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
+       req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
        cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
        cm_req_set_resp_res(req_msg, param->responder_resources);
        cm_req_set_init_depth(req_msg, param->initiator_depth);
@@ -1443,7 +1441,7 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
        cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
        cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
        cm_rep_set_srq(rep_msg, param->srq);
-       rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
+       rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
 
        if (param->private_data && param->private_data_len)
                memcpy(rep_msg->private_data, param->private_data,
@@ -2995,9 +2993,9 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
        }
 }
 
-static void cm_work_handler(void *data)
+static void cm_work_handler(struct work_struct *_work)
 {
-       struct cm_work *work = data;
+       struct cm_work *work = container_of(_work, struct cm_work, work.work);
        int ret;
 
        switch (work->cm_event.event) {
@@ -3087,12 +3085,12 @@ static int cm_establish(struct ib_cm_id *cm_id)
         * we need to find the cm_id once we're in the context of the
         * worker thread, rather than holding a reference on it.
         */
-       INIT_WORK(&work->work, cm_work_handler, work);
+       INIT_DELAYED_WORK(&work->work, cm_work_handler);
        work->local_id = cm_id->local_id;
        work->remote_id = cm_id->remote_id;
        work->mad_recv_wc = NULL;
        work->cm_event.event = IB_CM_USER_ESTABLISHED;
-       queue_work(cm.wq, &work->work);
+       queue_delayed_work(cm.wq, &work->work, 0);
 out:
        return ret;
 }
@@ -3191,11 +3189,11 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
                return;
        }
 
-       INIT_WORK(&work->work, cm_work_handler, work);
+       INIT_DELAYED_WORK(&work->work, cm_work_handler);
        work->cm_event.event = event;
        work->mad_recv_wc = mad_recv_wc;
        work->port = (struct cm_port *)mad_agent->context;
-       queue_work(cm.wq, &work->work);
+       queue_delayed_work(cm.wq, &work->work, 0);
 }
 
 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
@@ -3290,6 +3288,10 @@ static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
        switch (cm_id_priv->id.state) {
+       /* Allow transition to RTS before sending REP */
+       case IB_CM_REQ_RCVD:
+       case IB_CM_MRA_REQ_SENT:
+
        case IB_CM_REP_RCVD:
        case IB_CM_MRA_REP_SENT:
        case IB_CM_REP_SENT:
@@ -3382,7 +3384,6 @@ static void cm_add_one(struct ib_device *device)
                return;
 
        cm_dev->device = device;
-       cm_dev->ca_guid = device->node_guid;
 
        set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
        for (i = 1; i <= device->phys_port_cnt; i++) {