Merge ../torvalds-2.6/
[pandora-kernel.git] / drivers / infiniband / core / cm.c
1 /*
2  * Copyright (c) 2004, 2005 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  *
35  * $Id: cm.c 2821 2005-07-08 17:07:28Z sean.hefty $
36  */
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/idr.h>
40 #include <linux/interrupt.h>
41 #include <linux/pci.h>
42 #include <linux/rbtree.h>
43 #include <linux/spinlock.h>
44 #include <linux/workqueue.h>
45
46 #include <rdma/ib_cache.h>
47 #include <rdma/ib_cm.h>
48 #include "cm_msgs.h"
49
50 MODULE_AUTHOR("Sean Hefty");
51 MODULE_DESCRIPTION("InfiniBand CM");
52 MODULE_LICENSE("Dual BSD/GPL");
53
54 static void cm_add_one(struct ib_device *device);
55 static void cm_remove_one(struct ib_device *device);
56
57 static struct ib_client cm_client = {
58         .name   = "cm",
59         .add    = cm_add_one,
60         .remove = cm_remove_one
61 };
62
63 static struct ib_cm {
64         spinlock_t lock;
65         struct list_head device_list;
66         rwlock_t device_lock;
67         struct rb_root listen_service_table;
68         u64 listen_service_id;
69         /* struct rb_root peer_service_table; todo: fix peer to peer */
70         struct rb_root remote_qp_table;
71         struct rb_root remote_id_table;
72         struct rb_root remote_sidr_table;
73         struct idr local_id_table;
74         struct workqueue_struct *wq;
75 } cm;
76
77 struct cm_port {
78         struct cm_device *cm_dev;
79         struct ib_mad_agent *mad_agent;
80         u8 port_num;
81 };
82
83 struct cm_device {
84         struct list_head list;
85         struct ib_device *device;
86         __be64 ca_guid;
87         struct cm_port port[0];
88 };
89
90 struct cm_av {
91         struct cm_port *port;
92         union ib_gid dgid;
93         struct ib_ah_attr ah_attr;
94         u16 pkey_index;
95         u8 packet_life_time;
96 };
97
98 struct cm_work {
99         struct work_struct work;
100         struct list_head list;
101         struct cm_port *port;
102         struct ib_mad_recv_wc *mad_recv_wc;     /* Received MADs */
103         __be32 local_id;                        /* Established / timewait */
104         __be32 remote_id;
105         struct ib_cm_event cm_event;
106         struct ib_sa_path_rec path[0];
107 };
108
109 struct cm_timewait_info {
110         struct cm_work work;                    /* Must be first. */
111         struct rb_node remote_qp_node;
112         struct rb_node remote_id_node;
113         __be64 remote_ca_guid;
114         __be32 remote_qpn;
115         u8 inserted_remote_qp;
116         u8 inserted_remote_id;
117 };
118
119 struct cm_id_private {
120         struct ib_cm_id id;
121
122         struct rb_node service_node;
123         struct rb_node sidr_id_node;
124         spinlock_t lock;
125         wait_queue_head_t wait;
126         atomic_t refcount;
127
128         struct ib_mad_send_buf *msg;
129         struct cm_timewait_info *timewait_info;
130         /* todo: use alternate port on send failure */
131         struct cm_av av;
132         struct cm_av alt_av;
133
134         void *private_data;
135         __be64 tid;
136         __be32 local_qpn;
137         __be32 remote_qpn;
138         __be32 sq_psn;
139         __be32 rq_psn;
140         int timeout_ms;
141         enum ib_mtu path_mtu;
142         u8 private_data_len;
143         u8 max_cm_retries;
144         u8 peer_to_peer;
145         u8 responder_resources;
146         u8 initiator_depth;
147         u8 local_ack_timeout;
148         u8 retry_count;
149         u8 rnr_retry_count;
150         u8 service_timeout;
151
152         struct list_head work_list;
153         atomic_t work_count;
154 };
155
156 static void cm_work_handler(void *data);
157
158 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
159 {
160         if (atomic_dec_and_test(&cm_id_priv->refcount))
161                 wake_up(&cm_id_priv->wait);
162 }
163
164 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
165                         struct ib_mad_send_buf **msg)
166 {
167         struct ib_mad_agent *mad_agent;
168         struct ib_mad_send_buf *m;
169         struct ib_ah *ah;
170
171         mad_agent = cm_id_priv->av.port->mad_agent;
172         ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
173         if (IS_ERR(ah))
174                 return PTR_ERR(ah);
175
176         m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, 
177                                cm_id_priv->av.pkey_index,
178                                ah, 0, sizeof(struct ib_mad_hdr),
179                                sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
180                                GFP_ATOMIC);
181         if (IS_ERR(m)) {
182                 ib_destroy_ah(ah);
183                 return PTR_ERR(m);
184         }
185
186         /* Timeout set by caller if response is expected. */
187         m->send_wr.wr.ud.retries = cm_id_priv->max_cm_retries;
188
189         atomic_inc(&cm_id_priv->refcount);
190         m->context[0] = cm_id_priv;
191         *msg = m;
192         return 0;
193 }
194
195 static int cm_alloc_response_msg(struct cm_port *port,
196                                  struct ib_mad_recv_wc *mad_recv_wc,
197                                  struct ib_mad_send_buf **msg)
198 {
199         struct ib_mad_send_buf *m;
200         struct ib_ah *ah;
201
202         ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
203                                   mad_recv_wc->recv_buf.grh, port->port_num);
204         if (IS_ERR(ah))
205                 return PTR_ERR(ah);
206
207         m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
208                                ah, 0, sizeof(struct ib_mad_hdr),
209                                sizeof(struct ib_mad)-sizeof(struct ib_mad_hdr),
210                                GFP_ATOMIC);
211         if (IS_ERR(m)) {
212                 ib_destroy_ah(ah);
213                 return PTR_ERR(m);
214         }
215         *msg = m;
216         return 0;
217 }
218
219 static void cm_free_msg(struct ib_mad_send_buf *msg)
220 {
221         ib_destroy_ah(msg->send_wr.wr.ud.ah);
222         if (msg->context[0])
223                 cm_deref_id(msg->context[0]);
224         ib_free_send_mad(msg);
225 }
226
227 static void * cm_copy_private_data(const void *private_data,
228                                    u8 private_data_len)
229 {
230         void *data;
231
232         if (!private_data || !private_data_len)
233                 return NULL;
234
235         data = kmalloc(private_data_len, GFP_KERNEL);
236         if (!data)
237                 return ERR_PTR(-ENOMEM);
238
239         memcpy(data, private_data, private_data_len);
240         return data;
241 }
242
243 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
244                                  void *private_data, u8 private_data_len)
245 {
246         if (cm_id_priv->private_data && cm_id_priv->private_data_len)
247                 kfree(cm_id_priv->private_data);
248
249         cm_id_priv->private_data = private_data;
250         cm_id_priv->private_data_len = private_data_len;
251 }
252
253 static void cm_set_ah_attr(struct ib_ah_attr *ah_attr, u8 port_num,
254                            u16 dlid, u8 sl, u16 src_path_bits)
255 {
256         memset(ah_attr, 0, sizeof ah_attr);
257         ah_attr->dlid = dlid;
258         ah_attr->sl = sl;
259         ah_attr->src_path_bits = src_path_bits;
260         ah_attr->port_num = port_num;
261 }
262
263 static void cm_init_av_for_response(struct cm_port *port,
264                                     struct ib_wc *wc, struct cm_av *av)
265 {
266         av->port = port;
267         av->pkey_index = wc->pkey_index;
268         cm_set_ah_attr(&av->ah_attr, port->port_num, wc->slid,
269                        wc->sl, wc->dlid_path_bits);
270 }
271
272 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
273 {
274         struct cm_device *cm_dev;
275         struct cm_port *port = NULL;
276         unsigned long flags;
277         int ret;
278         u8 p;
279
280         read_lock_irqsave(&cm.device_lock, flags);
281         list_for_each_entry(cm_dev, &cm.device_list, list) {
282                 if (!ib_find_cached_gid(cm_dev->device, &path->sgid,
283                                         &p, NULL)) {
284                         port = &cm_dev->port[p-1];
285                         break;
286                 }
287         }
288         read_unlock_irqrestore(&cm.device_lock, flags);
289
290         if (!port)
291                 return -EINVAL;
292
293         ret = ib_find_cached_pkey(cm_dev->device, port->port_num,
294                                   be16_to_cpu(path->pkey), &av->pkey_index);
295         if (ret)
296                 return ret;
297
298         av->port = port;
299         cm_set_ah_attr(&av->ah_attr, av->port->port_num,
300                        be16_to_cpu(path->dlid), path->sl,
301                        be16_to_cpu(path->slid) & 0x7F);
302         av->packet_life_time = path->packet_life_time;
303         return 0;
304 }
305
306 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
307 {
308         unsigned long flags;
309         int ret;
310
311         do {
312                 spin_lock_irqsave(&cm.lock, flags);
313                 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv, 1,
314                                         (__force int *) &cm_id_priv->id.local_id);
315                 spin_unlock_irqrestore(&cm.lock, flags);
316         } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
317         return ret;
318 }
319
320 static void cm_free_id(__be32 local_id)
321 {
322         unsigned long flags;
323
324         spin_lock_irqsave(&cm.lock, flags);
325         idr_remove(&cm.local_id_table, (__force int) local_id);
326         spin_unlock_irqrestore(&cm.lock, flags);
327 }
328
329 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
330 {
331         struct cm_id_private *cm_id_priv;
332
333         cm_id_priv = idr_find(&cm.local_id_table, (__force int) local_id);
334         if (cm_id_priv) {
335                 if (cm_id_priv->id.remote_id == remote_id)
336                         atomic_inc(&cm_id_priv->refcount);
337                 else
338                         cm_id_priv = NULL;
339         }
340
341         return cm_id_priv;
342 }
343
344 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
345 {
346         struct cm_id_private *cm_id_priv;
347         unsigned long flags;
348
349         spin_lock_irqsave(&cm.lock, flags);
350         cm_id_priv = cm_get_id(local_id, remote_id);
351         spin_unlock_irqrestore(&cm.lock, flags);
352
353         return cm_id_priv;
354 }
355
356 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
357 {
358         struct rb_node **link = &cm.listen_service_table.rb_node;
359         struct rb_node *parent = NULL;
360         struct cm_id_private *cur_cm_id_priv;
361         __be64 service_id = cm_id_priv->id.service_id;
362         __be64 service_mask = cm_id_priv->id.service_mask;
363
364         while (*link) {
365                 parent = *link;
366                 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
367                                           service_node);
368                 if ((cur_cm_id_priv->id.service_mask & service_id) ==
369                     (service_mask & cur_cm_id_priv->id.service_id))
370                         return cm_id_priv;
371                 if (service_id < cur_cm_id_priv->id.service_id)
372                         link = &(*link)->rb_left;
373                 else
374                         link = &(*link)->rb_right;
375         }
376         rb_link_node(&cm_id_priv->service_node, parent, link);
377         rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
378         return NULL;
379 }
380
381 static struct cm_id_private * cm_find_listen(__be64 service_id)
382 {
383         struct rb_node *node = cm.listen_service_table.rb_node;
384         struct cm_id_private *cm_id_priv;
385
386         while (node) {
387                 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
388                 if ((cm_id_priv->id.service_mask & service_id) ==
389                     (cm_id_priv->id.service_mask & cm_id_priv->id.service_id))
390                         return cm_id_priv;
391                 if (service_id < cm_id_priv->id.service_id)
392                         node = node->rb_left;
393                 else
394                         node = node->rb_right;
395         }
396         return NULL;
397 }
398
399 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
400                                                      *timewait_info)
401 {
402         struct rb_node **link = &cm.remote_id_table.rb_node;
403         struct rb_node *parent = NULL;
404         struct cm_timewait_info *cur_timewait_info;
405         __be64 remote_ca_guid = timewait_info->remote_ca_guid;
406         __be32 remote_id = timewait_info->work.remote_id;
407
408         while (*link) {
409                 parent = *link;
410                 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
411                                              remote_id_node);
412                 if (remote_id < cur_timewait_info->work.remote_id)
413                         link = &(*link)->rb_left;
414                 else if (remote_id > cur_timewait_info->work.remote_id)
415                         link = &(*link)->rb_right;
416                 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
417                         link = &(*link)->rb_left;
418                 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
419                         link = &(*link)->rb_right;
420                 else
421                         return cur_timewait_info;
422         }
423         timewait_info->inserted_remote_id = 1;
424         rb_link_node(&timewait_info->remote_id_node, parent, link);
425         rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
426         return NULL;
427 }
428
429 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
430                                                    __be32 remote_id)
431 {
432         struct rb_node *node = cm.remote_id_table.rb_node;
433         struct cm_timewait_info *timewait_info;
434
435         while (node) {
436                 timewait_info = rb_entry(node, struct cm_timewait_info,
437                                          remote_id_node);
438                 if (remote_id < timewait_info->work.remote_id)
439                         node = node->rb_left;
440                 else if (remote_id > timewait_info->work.remote_id)
441                         node = node->rb_right;
442                 else if (remote_ca_guid < timewait_info->remote_ca_guid)
443                         node = node->rb_left;
444                 else if (remote_ca_guid > timewait_info->remote_ca_guid)
445                         node = node->rb_right;
446                 else
447                         return timewait_info;
448         }
449         return NULL;
450 }
451
452 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
453                                                       *timewait_info)
454 {
455         struct rb_node **link = &cm.remote_qp_table.rb_node;
456         struct rb_node *parent = NULL;
457         struct cm_timewait_info *cur_timewait_info;
458         __be64 remote_ca_guid = timewait_info->remote_ca_guid;
459         __be32 remote_qpn = timewait_info->remote_qpn;
460
461         while (*link) {
462                 parent = *link;
463                 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
464                                              remote_qp_node);
465                 if (remote_qpn < cur_timewait_info->remote_qpn)
466                         link = &(*link)->rb_left;
467                 else if (remote_qpn > cur_timewait_info->remote_qpn)
468                         link = &(*link)->rb_right;
469                 else if (remote_ca_guid < cur_timewait_info->remote_ca_guid)
470                         link = &(*link)->rb_left;
471                 else if (remote_ca_guid > cur_timewait_info->remote_ca_guid)
472                         link = &(*link)->rb_right;
473                 else
474                         return cur_timewait_info;
475         }
476         timewait_info->inserted_remote_qp = 1;
477         rb_link_node(&timewait_info->remote_qp_node, parent, link);
478         rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
479         return NULL;
480 }
481
482 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
483                                                     *cm_id_priv)
484 {
485         struct rb_node **link = &cm.remote_sidr_table.rb_node;
486         struct rb_node *parent = NULL;
487         struct cm_id_private *cur_cm_id_priv;
488         union ib_gid *port_gid = &cm_id_priv->av.dgid;
489         __be32 remote_id = cm_id_priv->id.remote_id;
490
491         while (*link) {
492                 parent = *link;
493                 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
494                                           sidr_id_node);
495                 if (remote_id < cur_cm_id_priv->id.remote_id)
496                         link = &(*link)->rb_left;
497                 else if (remote_id > cur_cm_id_priv->id.remote_id)
498                         link = &(*link)->rb_right;
499                 else {
500                         int cmp;
501                         cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
502                                      sizeof *port_gid);
503                         if (cmp < 0)
504                                 link = &(*link)->rb_left;
505                         else if (cmp > 0)
506                                 link = &(*link)->rb_right;
507                         else
508                                 return cur_cm_id_priv;
509                 }
510         }
511         rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
512         rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
513         return NULL;
514 }
515
516 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
517                                enum ib_cm_sidr_status status)
518 {
519         struct ib_cm_sidr_rep_param param;
520
521         memset(&param, 0, sizeof param);
522         param.status = status;
523         ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
524 }
525
526 struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
527                                  void *context)
528 {
529         struct cm_id_private *cm_id_priv;
530         int ret;
531
532         cm_id_priv = kmalloc(sizeof *cm_id_priv, GFP_KERNEL);
533         if (!cm_id_priv)
534                 return ERR_PTR(-ENOMEM);
535
536         memset(cm_id_priv, 0, sizeof *cm_id_priv);
537         cm_id_priv->id.state = IB_CM_IDLE;
538         cm_id_priv->id.cm_handler = cm_handler;
539         cm_id_priv->id.context = context;
540         cm_id_priv->id.remote_cm_qpn = 1;
541         ret = cm_alloc_id(cm_id_priv);
542         if (ret)
543                 goto error;
544
545         spin_lock_init(&cm_id_priv->lock);
546         init_waitqueue_head(&cm_id_priv->wait);
547         INIT_LIST_HEAD(&cm_id_priv->work_list);
548         atomic_set(&cm_id_priv->work_count, -1);
549         atomic_set(&cm_id_priv->refcount, 1);
550         return &cm_id_priv->id;
551
552 error:
553         kfree(cm_id_priv);
554         return ERR_PTR(-ENOMEM);
555 }
556 EXPORT_SYMBOL(ib_create_cm_id);
557
558 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
559 {
560         struct cm_work *work;
561
562         if (list_empty(&cm_id_priv->work_list))
563                 return NULL;
564
565         work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
566         list_del(&work->list);
567         return work;
568 }
569
570 static void cm_free_work(struct cm_work *work)
571 {
572         if (work->mad_recv_wc)
573                 ib_free_recv_mad(work->mad_recv_wc);
574         kfree(work);
575 }
576
577 static inline int cm_convert_to_ms(int iba_time)
578 {
579         /* approximate conversion to ms from 4.096us x 2^iba_time */
580         return 1 << max(iba_time - 8, 0);
581 }
582
583 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
584 {
585         unsigned long flags;
586
587         if (!timewait_info->inserted_remote_id &&
588             !timewait_info->inserted_remote_qp)
589             return;
590
591         spin_lock_irqsave(&cm.lock, flags);
592         if (timewait_info->inserted_remote_id) {
593                 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
594                 timewait_info->inserted_remote_id = 0;
595         }
596
597         if (timewait_info->inserted_remote_qp) {
598                 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
599                 timewait_info->inserted_remote_qp = 0;
600         }
601         spin_unlock_irqrestore(&cm.lock, flags);
602 }
603
604 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
605 {
606         struct cm_timewait_info *timewait_info;
607
608         timewait_info = kmalloc(sizeof *timewait_info, GFP_KERNEL);
609         if (!timewait_info)
610                 return ERR_PTR(-ENOMEM);
611         memset(timewait_info, 0, sizeof *timewait_info);
612
613         timewait_info->work.local_id = local_id;
614         INIT_WORK(&timewait_info->work.work, cm_work_handler,
615                   &timewait_info->work);
616         timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
617         return timewait_info;
618 }
619
620 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
621 {
622         int wait_time;
623
624         /*
625          * The cm_id could be destroyed by the user before we exit timewait.
626          * To protect against this, we search for the cm_id after exiting
627          * timewait before notifying the user that we've exited timewait.
628          */
629         cm_id_priv->id.state = IB_CM_TIMEWAIT;
630         wait_time = cm_convert_to_ms(cm_id_priv->local_ack_timeout);
631         queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
632                            msecs_to_jiffies(wait_time));
633         cm_id_priv->timewait_info = NULL;
634 }
635
636 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
637 {
638         cm_id_priv->id.state = IB_CM_IDLE;
639         if (cm_id_priv->timewait_info) {
640                 cm_cleanup_timewait(cm_id_priv->timewait_info);
641                 kfree(cm_id_priv->timewait_info);
642                 cm_id_priv->timewait_info = NULL;
643         }
644 }
645
646 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
647 {
648         struct cm_id_private *cm_id_priv;
649         struct cm_work *work;
650         unsigned long flags;
651
652         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
653 retest:
654         spin_lock_irqsave(&cm_id_priv->lock, flags);
655         switch (cm_id->state) {
656         case IB_CM_LISTEN:
657                 cm_id->state = IB_CM_IDLE;
658                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
659                 spin_lock_irqsave(&cm.lock, flags);
660                 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
661                 spin_unlock_irqrestore(&cm.lock, flags);
662                 break;
663         case IB_CM_SIDR_REQ_SENT:
664                 cm_id->state = IB_CM_IDLE;
665                 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
666                               (unsigned long) cm_id_priv->msg);
667                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
668                 break;
669         case IB_CM_SIDR_REQ_RCVD:
670                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
671                 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
672                 break;
673         case IB_CM_REQ_SENT:
674         case IB_CM_MRA_REQ_RCVD:
675         case IB_CM_REP_SENT:
676         case IB_CM_MRA_REP_RCVD:
677                 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
678                               (unsigned long) cm_id_priv->msg);
679                 /* Fall through */
680         case IB_CM_REQ_RCVD:
681         case IB_CM_MRA_REQ_SENT:
682         case IB_CM_REP_RCVD:
683         case IB_CM_MRA_REP_SENT:
684                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
685                 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
686                                &cm_id_priv->av.port->cm_dev->ca_guid,
687                                sizeof cm_id_priv->av.port->cm_dev->ca_guid,
688                                NULL, 0);
689                 break;
690         case IB_CM_ESTABLISHED:
691                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
692                 ib_send_cm_dreq(cm_id, NULL, 0);
693                 goto retest;
694         case IB_CM_DREQ_SENT:
695                 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
696                               (unsigned long) cm_id_priv->msg);
697                 cm_enter_timewait(cm_id_priv);
698                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
699                 break;
700         case IB_CM_DREQ_RCVD:
701                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
702                 ib_send_cm_drep(cm_id, NULL, 0);
703                 break;
704         default:
705                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
706                 break;
707         }
708
709         cm_free_id(cm_id->local_id);
710         atomic_dec(&cm_id_priv->refcount);
711         wait_event(cm_id_priv->wait, !atomic_read(&cm_id_priv->refcount));
712         while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
713                 cm_free_work(work);
714         if (cm_id_priv->private_data && cm_id_priv->private_data_len)
715                 kfree(cm_id_priv->private_data);
716         kfree(cm_id_priv);
717 }
718 EXPORT_SYMBOL(ib_destroy_cm_id);
719
720 int ib_cm_listen(struct ib_cm_id *cm_id,
721                  __be64 service_id,
722                  __be64 service_mask)
723 {
724         struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
725         unsigned long flags;
726         int ret = 0;
727
728         service_mask = service_mask ? service_mask :
729                        __constant_cpu_to_be64(~0ULL);
730         service_id &= service_mask;
731         if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
732             (service_id != IB_CM_ASSIGN_SERVICE_ID))
733                 return -EINVAL;
734
735         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
736         BUG_ON(cm_id->state != IB_CM_IDLE);
737
738         cm_id->state = IB_CM_LISTEN;
739
740         spin_lock_irqsave(&cm.lock, flags);
741         if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
742                 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
743                 cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
744         } else {
745                 cm_id->service_id = service_id;
746                 cm_id->service_mask = service_mask;
747         }
748         cur_cm_id_priv = cm_insert_listen(cm_id_priv);
749         spin_unlock_irqrestore(&cm.lock, flags);
750
751         if (cur_cm_id_priv) {
752                 cm_id->state = IB_CM_IDLE;
753                 ret = -EBUSY;
754         }
755         return ret;
756 }
757 EXPORT_SYMBOL(ib_cm_listen);
758
759 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
760                           enum cm_msg_sequence msg_seq)
761 {
762         u64 hi_tid, low_tid;
763
764         hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
765         low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
766                           (msg_seq << 30));
767         return cpu_to_be64(hi_tid | low_tid);
768 }
769
770 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
771                               __be16 attr_id, __be64 tid)
772 {
773         hdr->base_version  = IB_MGMT_BASE_VERSION;
774         hdr->mgmt_class    = IB_MGMT_CLASS_CM;
775         hdr->class_version = IB_CM_CLASS_VERSION;
776         hdr->method        = IB_MGMT_METHOD_SEND;
777         hdr->attr_id       = attr_id;
778         hdr->tid           = tid;
779 }
780
781 static void cm_format_req(struct cm_req_msg *req_msg,
782                           struct cm_id_private *cm_id_priv,
783                           struct ib_cm_req_param *param)
784 {
785         cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
786                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
787
788         req_msg->local_comm_id = cm_id_priv->id.local_id;
789         req_msg->service_id = param->service_id;
790         req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
791         cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
792         cm_req_set_resp_res(req_msg, param->responder_resources);
793         cm_req_set_init_depth(req_msg, param->initiator_depth);
794         cm_req_set_remote_resp_timeout(req_msg,
795                                        param->remote_cm_response_timeout);
796         cm_req_set_qp_type(req_msg, param->qp_type);
797         cm_req_set_flow_ctrl(req_msg, param->flow_control);
798         cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
799         cm_req_set_local_resp_timeout(req_msg,
800                                       param->local_cm_response_timeout);
801         cm_req_set_retry_count(req_msg, param->retry_count);
802         req_msg->pkey = param->primary_path->pkey;
803         cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
804         cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
805         cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
806         cm_req_set_srq(req_msg, param->srq);
807
808         req_msg->primary_local_lid = param->primary_path->slid;
809         req_msg->primary_remote_lid = param->primary_path->dlid;
810         req_msg->primary_local_gid = param->primary_path->sgid;
811         req_msg->primary_remote_gid = param->primary_path->dgid;
812         cm_req_set_primary_flow_label(req_msg, param->primary_path->flow_label);
813         cm_req_set_primary_packet_rate(req_msg, param->primary_path->rate);
814         req_msg->primary_traffic_class = param->primary_path->traffic_class;
815         req_msg->primary_hop_limit = param->primary_path->hop_limit;
816         cm_req_set_primary_sl(req_msg, param->primary_path->sl);
817         cm_req_set_primary_subnet_local(req_msg, 1); /* local only... */
818         cm_req_set_primary_local_ack_timeout(req_msg,
819                 min(31, param->primary_path->packet_life_time + 1));
820
821         if (param->alternate_path) {
822                 req_msg->alt_local_lid = param->alternate_path->slid;
823                 req_msg->alt_remote_lid = param->alternate_path->dlid;
824                 req_msg->alt_local_gid = param->alternate_path->sgid;
825                 req_msg->alt_remote_gid = param->alternate_path->dgid;
826                 cm_req_set_alt_flow_label(req_msg,
827                                           param->alternate_path->flow_label);
828                 cm_req_set_alt_packet_rate(req_msg, param->alternate_path->rate);
829                 req_msg->alt_traffic_class = param->alternate_path->traffic_class;
830                 req_msg->alt_hop_limit = param->alternate_path->hop_limit;
831                 cm_req_set_alt_sl(req_msg, param->alternate_path->sl);
832                 cm_req_set_alt_subnet_local(req_msg, 1); /* local only... */
833                 cm_req_set_alt_local_ack_timeout(req_msg,
834                         min(31, param->alternate_path->packet_life_time + 1));
835         }
836
837         if (param->private_data && param->private_data_len)
838                 memcpy(req_msg->private_data, param->private_data,
839                        param->private_data_len);
840 }
841
842 static inline int cm_validate_req_param(struct ib_cm_req_param *param)
843 {
844         /* peer-to-peer not supported */
845         if (param->peer_to_peer)
846                 return -EINVAL;
847
848         if (!param->primary_path)
849                 return -EINVAL;
850
851         if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
852                 return -EINVAL;
853
854         if (param->private_data &&
855             param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
856                 return -EINVAL;
857
858         if (param->alternate_path &&
859             (param->alternate_path->pkey != param->primary_path->pkey ||
860              param->alternate_path->mtu != param->primary_path->mtu))
861                 return -EINVAL;
862
863         return 0;
864 }
865
866 int ib_send_cm_req(struct ib_cm_id *cm_id,
867                    struct ib_cm_req_param *param)
868 {
869         struct cm_id_private *cm_id_priv;
870         struct ib_send_wr *bad_send_wr;
871         struct cm_req_msg *req_msg;
872         unsigned long flags;
873         int ret;
874
875         ret = cm_validate_req_param(param);
876         if (ret)
877                 return ret;
878
879         /* Verify that we're not in timewait. */
880         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
881         spin_lock_irqsave(&cm_id_priv->lock, flags);
882         if (cm_id->state != IB_CM_IDLE) {
883                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
884                 ret = -EINVAL;
885                 goto out;
886         }
887         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
888
889         cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
890                                                             id.local_id);
891         if (IS_ERR(cm_id_priv->timewait_info))
892                 goto out;
893
894         ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
895         if (ret)
896                 goto error1;
897         if (param->alternate_path) {
898                 ret = cm_init_av_by_path(param->alternate_path,
899                                          &cm_id_priv->alt_av);
900                 if (ret)
901                         goto error1;
902         }
903         cm_id->service_id = param->service_id;
904         cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
905         cm_id_priv->timeout_ms = cm_convert_to_ms(
906                                     param->primary_path->packet_life_time) * 2 +
907                                  cm_convert_to_ms(
908                                     param->remote_cm_response_timeout);
909         cm_id_priv->max_cm_retries = param->max_cm_retries;
910         cm_id_priv->initiator_depth = param->initiator_depth;
911         cm_id_priv->responder_resources = param->responder_resources;
912         cm_id_priv->retry_count = param->retry_count;
913         cm_id_priv->path_mtu = param->primary_path->mtu;
914
915         ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
916         if (ret)
917                 goto error1;
918
919         req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
920         cm_format_req(req_msg, cm_id_priv, param);
921         cm_id_priv->tid = req_msg->hdr.tid;
922         cm_id_priv->msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
923         cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
924
925         cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
926         cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
927         cm_id_priv->local_ack_timeout =
928                                 cm_req_get_primary_local_ack_timeout(req_msg);
929
930         spin_lock_irqsave(&cm_id_priv->lock, flags);
931         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
932                                 &cm_id_priv->msg->send_wr, &bad_send_wr);
933         if (ret) {
934                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
935                 goto error2;
936         }
937         BUG_ON(cm_id->state != IB_CM_IDLE);
938         cm_id->state = IB_CM_REQ_SENT;
939         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
940         return 0;
941
942 error2: cm_free_msg(cm_id_priv->msg);
943 error1: kfree(cm_id_priv->timewait_info);
944 out:    return ret;
945 }
946 EXPORT_SYMBOL(ib_send_cm_req);
947
948 static int cm_issue_rej(struct cm_port *port,
949                         struct ib_mad_recv_wc *mad_recv_wc,
950                         enum ib_cm_rej_reason reason,
951                         enum cm_msg_response msg_rejected,
952                         void *ari, u8 ari_length)
953 {
954         struct ib_mad_send_buf *msg = NULL;
955         struct ib_send_wr *bad_send_wr;
956         struct cm_rej_msg *rej_msg, *rcv_msg;
957         int ret;
958
959         ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
960         if (ret)
961                 return ret;
962
963         /* We just need common CM header information.  Cast to any message. */
964         rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
965         rej_msg = (struct cm_rej_msg *) msg->mad;
966
967         cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
968         rej_msg->remote_comm_id = rcv_msg->local_comm_id;
969         rej_msg->local_comm_id = rcv_msg->remote_comm_id;
970         cm_rej_set_msg_rejected(rej_msg, msg_rejected);
971         rej_msg->reason = cpu_to_be16(reason);
972
973         if (ari && ari_length) {
974                 cm_rej_set_reject_info_len(rej_msg, ari_length);
975                 memcpy(rej_msg->ari, ari, ari_length);
976         }
977
978         ret = ib_post_send_mad(port->mad_agent, &msg->send_wr, &bad_send_wr);
979         if (ret)
980                 cm_free_msg(msg);
981
982         return ret;
983 }
984
985 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
986                                     __be32 local_qpn, __be32 remote_qpn)
987 {
988         return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
989                 ((local_ca_guid == remote_ca_guid) &&
990                  (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
991 }
992
993 static inline void cm_format_paths_from_req(struct cm_req_msg *req_msg,
994                                             struct ib_sa_path_rec *primary_path,
995                                             struct ib_sa_path_rec *alt_path)
996 {
997         memset(primary_path, 0, sizeof *primary_path);
998         primary_path->dgid = req_msg->primary_local_gid;
999         primary_path->sgid = req_msg->primary_remote_gid;
1000         primary_path->dlid = req_msg->primary_local_lid;
1001         primary_path->slid = req_msg->primary_remote_lid;
1002         primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1003         primary_path->hop_limit = req_msg->primary_hop_limit;
1004         primary_path->traffic_class = req_msg->primary_traffic_class;
1005         primary_path->reversible = 1;
1006         primary_path->pkey = req_msg->pkey;
1007         primary_path->sl = cm_req_get_primary_sl(req_msg);
1008         primary_path->mtu_selector = IB_SA_EQ;
1009         primary_path->mtu = cm_req_get_path_mtu(req_msg);
1010         primary_path->rate_selector = IB_SA_EQ;
1011         primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1012         primary_path->packet_life_time_selector = IB_SA_EQ;
1013         primary_path->packet_life_time =
1014                 cm_req_get_primary_local_ack_timeout(req_msg);
1015         primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1016
1017         if (req_msg->alt_local_lid) {
1018                 memset(alt_path, 0, sizeof *alt_path);
1019                 alt_path->dgid = req_msg->alt_local_gid;
1020                 alt_path->sgid = req_msg->alt_remote_gid;
1021                 alt_path->dlid = req_msg->alt_local_lid;
1022                 alt_path->slid = req_msg->alt_remote_lid;
1023                 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1024                 alt_path->hop_limit = req_msg->alt_hop_limit;
1025                 alt_path->traffic_class = req_msg->alt_traffic_class;
1026                 alt_path->reversible = 1;
1027                 alt_path->pkey = req_msg->pkey;
1028                 alt_path->sl = cm_req_get_alt_sl(req_msg);
1029                 alt_path->mtu_selector = IB_SA_EQ;
1030                 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1031                 alt_path->rate_selector = IB_SA_EQ;
1032                 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1033                 alt_path->packet_life_time_selector = IB_SA_EQ;
1034                 alt_path->packet_life_time =
1035                         cm_req_get_alt_local_ack_timeout(req_msg);
1036                 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1037         }
1038 }
1039
1040 static void cm_format_req_event(struct cm_work *work,
1041                                 struct cm_id_private *cm_id_priv,
1042                                 struct ib_cm_id *listen_id)
1043 {
1044         struct cm_req_msg *req_msg;
1045         struct ib_cm_req_event_param *param;
1046
1047         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1048         param = &work->cm_event.param.req_rcvd;
1049         param->listen_id = listen_id;
1050         param->device = cm_id_priv->av.port->mad_agent->device;
1051         param->port = cm_id_priv->av.port->port_num;
1052         param->primary_path = &work->path[0];
1053         if (req_msg->alt_local_lid)
1054                 param->alternate_path = &work->path[1];
1055         else
1056                 param->alternate_path = NULL;
1057         param->remote_ca_guid = req_msg->local_ca_guid;
1058         param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1059         param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1060         param->qp_type = cm_req_get_qp_type(req_msg);
1061         param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1062         param->responder_resources = cm_req_get_init_depth(req_msg);
1063         param->initiator_depth = cm_req_get_resp_res(req_msg);
1064         param->local_cm_response_timeout =
1065                                         cm_req_get_remote_resp_timeout(req_msg);
1066         param->flow_control = cm_req_get_flow_ctrl(req_msg);
1067         param->remote_cm_response_timeout =
1068                                         cm_req_get_local_resp_timeout(req_msg);
1069         param->retry_count = cm_req_get_retry_count(req_msg);
1070         param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1071         param->srq = cm_req_get_srq(req_msg);
1072         work->cm_event.private_data = &req_msg->private_data;
1073 }
1074
1075 static void cm_process_work(struct cm_id_private *cm_id_priv,
1076                             struct cm_work *work)
1077 {
1078         unsigned long flags;
1079         int ret;
1080
1081         /* We will typically only have the current event to report. */
1082         ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1083         cm_free_work(work);
1084
1085         while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1086                 spin_lock_irqsave(&cm_id_priv->lock, flags);
1087                 work = cm_dequeue_work(cm_id_priv);
1088                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1089                 BUG_ON(!work);
1090                 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1091                                                 &work->cm_event);
1092                 cm_free_work(work);
1093         }
1094         cm_deref_id(cm_id_priv);
1095         if (ret)
1096                 ib_destroy_cm_id(&cm_id_priv->id);
1097 }
1098
1099 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1100                           struct cm_id_private *cm_id_priv,
1101                           enum cm_msg_response msg_mraed, u8 service_timeout,
1102                           const void *private_data, u8 private_data_len)
1103 {
1104         cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1105         cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1106         mra_msg->local_comm_id = cm_id_priv->id.local_id;
1107         mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1108         cm_mra_set_service_timeout(mra_msg, service_timeout);
1109
1110         if (private_data && private_data_len)
1111                 memcpy(mra_msg->private_data, private_data, private_data_len);
1112 }
1113
1114 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1115                           struct cm_id_private *cm_id_priv,
1116                           enum ib_cm_rej_reason reason,
1117                           void *ari,
1118                           u8 ari_length,
1119                           const void *private_data,
1120                           u8 private_data_len)
1121 {
1122         cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1123         rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1124
1125         switch(cm_id_priv->id.state) {
1126         case IB_CM_REQ_RCVD:
1127                 rej_msg->local_comm_id = 0;
1128                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1129                 break;
1130         case IB_CM_MRA_REQ_SENT:
1131                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1132                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1133                 break;
1134         case IB_CM_REP_RCVD:
1135         case IB_CM_MRA_REP_SENT:
1136                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1137                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1138                 break;
1139         default:
1140                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1141                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1142                 break;
1143         }
1144
1145         rej_msg->reason = cpu_to_be16(reason);
1146         if (ari && ari_length) {
1147                 cm_rej_set_reject_info_len(rej_msg, ari_length);
1148                 memcpy(rej_msg->ari, ari, ari_length);
1149         }
1150
1151         if (private_data && private_data_len)
1152                 memcpy(rej_msg->private_data, private_data, private_data_len);
1153 }
1154
1155 static void cm_dup_req_handler(struct cm_work *work,
1156                                struct cm_id_private *cm_id_priv)
1157 {
1158         struct ib_mad_send_buf *msg = NULL;
1159         struct ib_send_wr *bad_send_wr;
1160         unsigned long flags;
1161         int ret;
1162
1163         /* Quick state check to discard duplicate REQs. */
1164         if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1165                 return;
1166
1167         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1168         if (ret)
1169                 return;
1170
1171         spin_lock_irqsave(&cm_id_priv->lock, flags);
1172         switch (cm_id_priv->id.state) {
1173         case IB_CM_MRA_REQ_SENT:
1174                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1175                               CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1176                               cm_id_priv->private_data,
1177                               cm_id_priv->private_data_len);
1178                 break;
1179         case IB_CM_TIMEWAIT:
1180                 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1181                               IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1182                 break;
1183         default:
1184                 goto unlock;
1185         }
1186         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1187
1188         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1189                                &bad_send_wr);
1190         if (ret)
1191                 goto free;
1192         return;
1193
1194 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1195 free:   cm_free_msg(msg);
1196 }
1197
1198 static struct cm_id_private * cm_match_req(struct cm_work *work,
1199                                            struct cm_id_private *cm_id_priv)
1200 {
1201         struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1202         struct cm_timewait_info *timewait_info;
1203         struct cm_req_msg *req_msg;
1204         unsigned long flags;
1205
1206         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1207
1208         /* Check for duplicate REQ and stale connections. */
1209         spin_lock_irqsave(&cm.lock, flags);
1210         timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1211         if (!timewait_info)
1212                 timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1213
1214         if (timewait_info) {
1215                 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1216                                            timewait_info->work.remote_id);
1217                 spin_unlock_irqrestore(&cm.lock, flags);
1218                 if (cur_cm_id_priv) {
1219                         cm_dup_req_handler(work, cur_cm_id_priv);
1220                         cm_deref_id(cur_cm_id_priv);
1221                 } else
1222                         cm_issue_rej(work->port, work->mad_recv_wc,
1223                                      IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1224                                      NULL, 0);
1225                 goto error;
1226         }
1227
1228         /* Find matching listen request. */
1229         listen_cm_id_priv = cm_find_listen(req_msg->service_id);
1230         if (!listen_cm_id_priv) {
1231                 spin_unlock_irqrestore(&cm.lock, flags);
1232                 cm_issue_rej(work->port, work->mad_recv_wc,
1233                              IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1234                              NULL, 0);
1235                 goto error;
1236         }
1237         atomic_inc(&listen_cm_id_priv->refcount);
1238         atomic_inc(&cm_id_priv->refcount);
1239         cm_id_priv->id.state = IB_CM_REQ_RCVD;
1240         atomic_inc(&cm_id_priv->work_count);
1241         spin_unlock_irqrestore(&cm.lock, flags);
1242         return listen_cm_id_priv;
1243
1244 error:  cm_cleanup_timewait(cm_id_priv->timewait_info);
1245         return NULL;
1246 }
1247
1248 static int cm_req_handler(struct cm_work *work)
1249 {
1250         struct ib_cm_id *cm_id;
1251         struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1252         struct cm_req_msg *req_msg;
1253         int ret;
1254
1255         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1256
1257         cm_id = ib_create_cm_id(NULL, NULL);
1258         if (IS_ERR(cm_id))
1259                 return PTR_ERR(cm_id);
1260
1261         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1262         cm_id_priv->id.remote_id = req_msg->local_comm_id;
1263         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1264                                 &cm_id_priv->av);
1265         cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1266                                                             id.local_id);
1267         if (IS_ERR(cm_id_priv->timewait_info)) {
1268                 ret = PTR_ERR(cm_id_priv->timewait_info);
1269                 goto error1;
1270         }
1271         cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1272         cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1273         cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1274
1275         listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1276         if (!listen_cm_id_priv) {
1277                 ret = -EINVAL;
1278                 goto error2;
1279         }
1280
1281         cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1282         cm_id_priv->id.context = listen_cm_id_priv->id.context;
1283         cm_id_priv->id.service_id = req_msg->service_id;
1284         cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
1285
1286         cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1287         ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1288         if (ret)
1289                 goto error3;
1290         if (req_msg->alt_local_lid) {
1291                 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1292                 if (ret)
1293                         goto error3;
1294         }
1295         cm_id_priv->tid = req_msg->hdr.tid;
1296         cm_id_priv->timeout_ms = cm_convert_to_ms(
1297                                         cm_req_get_local_resp_timeout(req_msg));
1298         cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1299         cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1300         cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1301         cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1302         cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1303         cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1304         cm_id_priv->local_ack_timeout =
1305                                 cm_req_get_primary_local_ack_timeout(req_msg);
1306         cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1307         cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1308
1309         cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1310         cm_process_work(cm_id_priv, work);
1311         cm_deref_id(listen_cm_id_priv);
1312         return 0;
1313
1314 error3: atomic_dec(&cm_id_priv->refcount);
1315         cm_deref_id(listen_cm_id_priv);
1316         cm_cleanup_timewait(cm_id_priv->timewait_info);
1317 error2: kfree(cm_id_priv->timewait_info);
1318         cm_id_priv->timewait_info = NULL;
1319 error1: ib_destroy_cm_id(&cm_id_priv->id);
1320         return ret;
1321 }
1322
1323 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1324                           struct cm_id_private *cm_id_priv,
1325                           struct ib_cm_rep_param *param)
1326 {
1327         cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1328         rep_msg->local_comm_id = cm_id_priv->id.local_id;
1329         rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1330         cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1331         cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1332         rep_msg->resp_resources = param->responder_resources;
1333         rep_msg->initiator_depth = param->initiator_depth;
1334         cm_rep_set_target_ack_delay(rep_msg, param->target_ack_delay);
1335         cm_rep_set_failover(rep_msg, param->failover_accepted);
1336         cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1337         cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1338         cm_rep_set_srq(rep_msg, param->srq);
1339         rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid;
1340
1341         if (param->private_data && param->private_data_len)
1342                 memcpy(rep_msg->private_data, param->private_data,
1343                        param->private_data_len);
1344 }
1345
1346 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1347                    struct ib_cm_rep_param *param)
1348 {
1349         struct cm_id_private *cm_id_priv;
1350         struct ib_mad_send_buf *msg;
1351         struct cm_rep_msg *rep_msg;
1352         struct ib_send_wr *bad_send_wr;
1353         unsigned long flags;
1354         int ret;
1355
1356         if (param->private_data &&
1357             param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1358                 return -EINVAL;
1359
1360         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1361         spin_lock_irqsave(&cm_id_priv->lock, flags);
1362         if (cm_id->state != IB_CM_REQ_RCVD &&
1363             cm_id->state != IB_CM_MRA_REQ_SENT) {
1364                 ret = -EINVAL;
1365                 goto out;
1366         }
1367
1368         ret = cm_alloc_msg(cm_id_priv, &msg);
1369         if (ret)
1370                 goto out;
1371
1372         rep_msg = (struct cm_rep_msg *) msg->mad;
1373         cm_format_rep(rep_msg, cm_id_priv, param);
1374         msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1375         msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1376
1377         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1378                                &msg->send_wr, &bad_send_wr);
1379         if (ret) {
1380                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1381                 cm_free_msg(msg);
1382                 return ret;
1383         }
1384
1385         cm_id->state = IB_CM_REP_SENT;
1386         cm_id_priv->msg = msg;
1387         cm_id_priv->initiator_depth = param->initiator_depth;
1388         cm_id_priv->responder_resources = param->responder_resources;
1389         cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1390         cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1391
1392 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1393         return ret;
1394 }
1395 EXPORT_SYMBOL(ib_send_cm_rep);
1396
1397 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1398                           struct cm_id_private *cm_id_priv,
1399                           const void *private_data,
1400                           u8 private_data_len)
1401 {
1402         cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1403         rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1404         rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1405
1406         if (private_data && private_data_len)
1407                 memcpy(rtu_msg->private_data, private_data, private_data_len);
1408 }
1409
1410 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1411                    const void *private_data,
1412                    u8 private_data_len)
1413 {
1414         struct cm_id_private *cm_id_priv;
1415         struct ib_mad_send_buf *msg;
1416         struct ib_send_wr *bad_send_wr;
1417         unsigned long flags;
1418         void *data;
1419         int ret;
1420
1421         if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1422                 return -EINVAL;
1423
1424         data = cm_copy_private_data(private_data, private_data_len);
1425         if (IS_ERR(data))
1426                 return PTR_ERR(data);
1427
1428         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1429         spin_lock_irqsave(&cm_id_priv->lock, flags);
1430         if (cm_id->state != IB_CM_REP_RCVD &&
1431             cm_id->state != IB_CM_MRA_REP_SENT) {
1432                 ret = -EINVAL;
1433                 goto error;
1434         }
1435
1436         ret = cm_alloc_msg(cm_id_priv, &msg);
1437         if (ret)
1438                 goto error;
1439
1440         cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1441                       private_data, private_data_len);
1442
1443         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1444                                &msg->send_wr, &bad_send_wr);
1445         if (ret) {
1446                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1447                 cm_free_msg(msg);
1448                 kfree(data);
1449                 return ret;
1450         }
1451
1452         cm_id->state = IB_CM_ESTABLISHED;
1453         cm_set_private_data(cm_id_priv, data, private_data_len);
1454         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1455         return 0;
1456
1457 error:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1458         kfree(data);
1459         return ret;
1460 }
1461 EXPORT_SYMBOL(ib_send_cm_rtu);
1462
1463 static void cm_format_rep_event(struct cm_work *work)
1464 {
1465         struct cm_rep_msg *rep_msg;
1466         struct ib_cm_rep_event_param *param;
1467
1468         rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1469         param = &work->cm_event.param.rep_rcvd;
1470         param->remote_ca_guid = rep_msg->local_ca_guid;
1471         param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1472         param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1473         param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1474         param->responder_resources = rep_msg->initiator_depth;
1475         param->initiator_depth = rep_msg->resp_resources;
1476         param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1477         param->failover_accepted = cm_rep_get_failover(rep_msg);
1478         param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1479         param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1480         param->srq = cm_rep_get_srq(rep_msg);
1481         work->cm_event.private_data = &rep_msg->private_data;
1482 }
1483
1484 static void cm_dup_rep_handler(struct cm_work *work)
1485 {
1486         struct cm_id_private *cm_id_priv;
1487         struct cm_rep_msg *rep_msg;
1488         struct ib_mad_send_buf *msg = NULL;
1489         struct ib_send_wr *bad_send_wr;
1490         unsigned long flags;
1491         int ret;
1492
1493         rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1494         cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1495                                    rep_msg->local_comm_id);
1496         if (!cm_id_priv)
1497                 return;
1498
1499         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1500         if (ret)
1501                 goto deref;
1502
1503         spin_lock_irqsave(&cm_id_priv->lock, flags);
1504         if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1505                 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1506                               cm_id_priv->private_data,
1507                               cm_id_priv->private_data_len);
1508         else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1509                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1510                               CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1511                               cm_id_priv->private_data,
1512                               cm_id_priv->private_data_len);
1513         else
1514                 goto unlock;
1515         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1516
1517         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1518                                &bad_send_wr);
1519         if (ret)
1520                 goto free;
1521         goto deref;
1522
1523 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1524 free:   cm_free_msg(msg);
1525 deref:  cm_deref_id(cm_id_priv);
1526 }
1527
1528 static int cm_rep_handler(struct cm_work *work)
1529 {
1530         struct cm_id_private *cm_id_priv;
1531         struct cm_rep_msg *rep_msg;
1532         unsigned long flags;
1533         int ret;
1534
1535         rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1536         cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1537         if (!cm_id_priv) {
1538                 cm_dup_rep_handler(work);
1539                 return -EINVAL;
1540         }
1541
1542         cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1543         cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1544         cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1545
1546         spin_lock_irqsave(&cm.lock, flags);
1547         /* Check for duplicate REP. */
1548         if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1549                 spin_unlock_irqrestore(&cm.lock, flags);
1550                 ret = -EINVAL;
1551                 goto error;
1552         }
1553         /* Check for a stale connection. */
1554         if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1555                 spin_unlock_irqrestore(&cm.lock, flags);
1556                 cm_issue_rej(work->port, work->mad_recv_wc,
1557                              IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1558                              NULL, 0);
1559                 ret = -EINVAL;
1560                 goto error;
1561         }
1562         spin_unlock_irqrestore(&cm.lock, flags);
1563
1564         cm_format_rep_event(work);
1565
1566         spin_lock_irqsave(&cm_id_priv->lock, flags);
1567         switch (cm_id_priv->id.state) {
1568         case IB_CM_REQ_SENT:
1569         case IB_CM_MRA_REQ_RCVD:
1570                 break;
1571         default:
1572                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1573                 ret = -EINVAL;
1574                 goto error;
1575         }
1576         cm_id_priv->id.state = IB_CM_REP_RCVD;
1577         cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1578         cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1579         cm_id_priv->initiator_depth = rep_msg->resp_resources;
1580         cm_id_priv->responder_resources = rep_msg->initiator_depth;
1581         cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1582         cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1583
1584         /* todo: handle peer_to_peer */
1585
1586         ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1587                       (unsigned long) cm_id_priv->msg);
1588         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1589         if (!ret)
1590                 list_add_tail(&work->list, &cm_id_priv->work_list);
1591         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1592
1593         if (ret)
1594                 cm_process_work(cm_id_priv, work);
1595         else
1596                 cm_deref_id(cm_id_priv);
1597         return 0;
1598
1599 error:  cm_cleanup_timewait(cm_id_priv->timewait_info);
1600         cm_deref_id(cm_id_priv);
1601         return ret;
1602 }
1603
1604 static int cm_establish_handler(struct cm_work *work)
1605 {
1606         struct cm_id_private *cm_id_priv;
1607         unsigned long flags;
1608         int ret;
1609
1610         /* See comment in ib_cm_establish about lookup. */
1611         cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1612         if (!cm_id_priv)
1613                 return -EINVAL;
1614
1615         spin_lock_irqsave(&cm_id_priv->lock, flags);
1616         if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1617                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1618                 goto out;
1619         }
1620
1621         ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1622                       (unsigned long) cm_id_priv->msg);
1623         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1624         if (!ret)
1625                 list_add_tail(&work->list, &cm_id_priv->work_list);
1626         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1627
1628         if (ret)
1629                 cm_process_work(cm_id_priv, work);
1630         else
1631                 cm_deref_id(cm_id_priv);
1632         return 0;
1633 out:
1634         cm_deref_id(cm_id_priv);
1635         return -EINVAL;
1636 }
1637
1638 static int cm_rtu_handler(struct cm_work *work)
1639 {
1640         struct cm_id_private *cm_id_priv;
1641         struct cm_rtu_msg *rtu_msg;
1642         unsigned long flags;
1643         int ret;
1644
1645         rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1646         cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1647                                    rtu_msg->local_comm_id);
1648         if (!cm_id_priv)
1649                 return -EINVAL;
1650
1651         work->cm_event.private_data = &rtu_msg->private_data;
1652
1653         spin_lock_irqsave(&cm_id_priv->lock, flags);
1654         if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1655             cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1656                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1657                 goto out;
1658         }
1659         cm_id_priv->id.state = IB_CM_ESTABLISHED;
1660
1661         ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1662                       (unsigned long) cm_id_priv->msg);
1663         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1664         if (!ret)
1665                 list_add_tail(&work->list, &cm_id_priv->work_list);
1666         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1667
1668         if (ret)
1669                 cm_process_work(cm_id_priv, work);
1670         else
1671                 cm_deref_id(cm_id_priv);
1672         return 0;
1673 out:
1674         cm_deref_id(cm_id_priv);
1675         return -EINVAL;
1676 }
1677
1678 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1679                           struct cm_id_private *cm_id_priv,
1680                           const void *private_data,
1681                           u8 private_data_len)
1682 {
1683         cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1684                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1685         dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1686         dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1687         cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1688
1689         if (private_data && private_data_len)
1690                 memcpy(dreq_msg->private_data, private_data, private_data_len);
1691 }
1692
1693 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1694                     const void *private_data,
1695                     u8 private_data_len)
1696 {
1697         struct cm_id_private *cm_id_priv;
1698         struct ib_mad_send_buf *msg;
1699         struct ib_send_wr *bad_send_wr;
1700         unsigned long flags;
1701         int ret;
1702
1703         if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1704                 return -EINVAL;
1705
1706         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1707         spin_lock_irqsave(&cm_id_priv->lock, flags);
1708         if (cm_id->state != IB_CM_ESTABLISHED) {
1709                 ret = -EINVAL;
1710                 goto out;
1711         }
1712
1713         ret = cm_alloc_msg(cm_id_priv, &msg);
1714         if (ret) {
1715                 cm_enter_timewait(cm_id_priv);
1716                 goto out;
1717         }
1718
1719         cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
1720                        private_data, private_data_len);
1721         msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
1722         msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
1723
1724         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1725                                &msg->send_wr, &bad_send_wr);
1726         if (ret) {
1727                 cm_enter_timewait(cm_id_priv);
1728                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1729                 cm_free_msg(msg);
1730                 return ret;
1731         }
1732
1733         cm_id->state = IB_CM_DREQ_SENT;
1734         cm_id_priv->msg = msg;
1735 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1736         return ret;
1737 }
1738 EXPORT_SYMBOL(ib_send_cm_dreq);
1739
1740 static void cm_format_drep(struct cm_drep_msg *drep_msg,
1741                           struct cm_id_private *cm_id_priv,
1742                           const void *private_data,
1743                           u8 private_data_len)
1744 {
1745         cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
1746         drep_msg->local_comm_id = cm_id_priv->id.local_id;
1747         drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1748
1749         if (private_data && private_data_len)
1750                 memcpy(drep_msg->private_data, private_data, private_data_len);
1751 }
1752
1753 int ib_send_cm_drep(struct ib_cm_id *cm_id,
1754                     const void *private_data,
1755                     u8 private_data_len)
1756 {
1757         struct cm_id_private *cm_id_priv;
1758         struct ib_mad_send_buf *msg;
1759         struct ib_send_wr *bad_send_wr;
1760         unsigned long flags;
1761         void *data;
1762         int ret;
1763
1764         if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
1765                 return -EINVAL;
1766
1767         data = cm_copy_private_data(private_data, private_data_len);
1768         if (IS_ERR(data))
1769                 return PTR_ERR(data);
1770
1771         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1772         spin_lock_irqsave(&cm_id_priv->lock, flags);
1773         if (cm_id->state != IB_CM_DREQ_RCVD) {
1774                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1775                 kfree(data);
1776                 return -EINVAL;
1777         }
1778
1779         cm_set_private_data(cm_id_priv, data, private_data_len);
1780         cm_enter_timewait(cm_id_priv);
1781
1782         ret = cm_alloc_msg(cm_id_priv, &msg);
1783         if (ret)
1784                 goto out;
1785
1786         cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1787                        private_data, private_data_len);
1788
1789         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent, &msg->send_wr,
1790                                &bad_send_wr);
1791         if (ret) {
1792                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1793                 cm_free_msg(msg);
1794                 return ret;
1795         }
1796
1797 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1798         return ret;
1799 }
1800 EXPORT_SYMBOL(ib_send_cm_drep);
1801
1802 static int cm_dreq_handler(struct cm_work *work)
1803 {
1804         struct cm_id_private *cm_id_priv;
1805         struct cm_dreq_msg *dreq_msg;
1806         struct ib_mad_send_buf *msg = NULL;
1807         struct ib_send_wr *bad_send_wr;
1808         unsigned long flags;
1809         int ret;
1810
1811         dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
1812         cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
1813                                    dreq_msg->local_comm_id);
1814         if (!cm_id_priv)
1815                 return -EINVAL;
1816
1817         work->cm_event.private_data = &dreq_msg->private_data;
1818
1819         spin_lock_irqsave(&cm_id_priv->lock, flags);
1820         if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
1821                 goto unlock;
1822
1823         switch (cm_id_priv->id.state) {
1824         case IB_CM_REP_SENT:
1825         case IB_CM_DREQ_SENT:
1826                 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1827                               (unsigned long) cm_id_priv->msg);
1828                 break;
1829         case IB_CM_ESTABLISHED:
1830         case IB_CM_MRA_REP_RCVD:
1831                 break;
1832         case IB_CM_TIMEWAIT:
1833                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
1834                         goto unlock;
1835
1836                 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
1837                                cm_id_priv->private_data,
1838                                cm_id_priv->private_data_len);
1839                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1840
1841                 if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1842                                      &msg->send_wr, &bad_send_wr))
1843                         cm_free_msg(msg);
1844                 goto deref;
1845         default:
1846                 goto unlock;
1847         }
1848         cm_id_priv->id.state = IB_CM_DREQ_RCVD;
1849         cm_id_priv->tid = dreq_msg->hdr.tid;
1850         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1851         if (!ret)
1852                 list_add_tail(&work->list, &cm_id_priv->work_list);
1853         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1854
1855         if (ret)
1856                 cm_process_work(cm_id_priv, work);
1857         else
1858                 cm_deref_id(cm_id_priv);
1859         return 0;
1860
1861 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1862 deref:  cm_deref_id(cm_id_priv);
1863         return -EINVAL;
1864 }
1865
1866 static int cm_drep_handler(struct cm_work *work)
1867 {
1868         struct cm_id_private *cm_id_priv;
1869         struct cm_drep_msg *drep_msg;
1870         unsigned long flags;
1871         int ret;
1872
1873         drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
1874         cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
1875                                    drep_msg->local_comm_id);
1876         if (!cm_id_priv)
1877                 return -EINVAL;
1878
1879         work->cm_event.private_data = &drep_msg->private_data;
1880
1881         spin_lock_irqsave(&cm_id_priv->lock, flags);
1882         if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
1883             cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
1884                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1885                 goto out;
1886         }
1887         cm_enter_timewait(cm_id_priv);
1888
1889         ib_cancel_mad(cm_id_priv->av.port->mad_agent,
1890                       (unsigned long) cm_id_priv->msg);
1891         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1892         if (!ret)
1893                 list_add_tail(&work->list, &cm_id_priv->work_list);
1894         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1895
1896         if (ret)
1897                 cm_process_work(cm_id_priv, work);
1898         else
1899                 cm_deref_id(cm_id_priv);
1900         return 0;
1901 out:
1902         cm_deref_id(cm_id_priv);
1903         return -EINVAL;
1904 }
1905
1906 int ib_send_cm_rej(struct ib_cm_id *cm_id,
1907                    enum ib_cm_rej_reason reason,
1908                    void *ari,
1909                    u8 ari_length,
1910                    const void *private_data,
1911                    u8 private_data_len)
1912 {
1913         struct cm_id_private *cm_id_priv;
1914         struct ib_mad_send_buf *msg;
1915         struct ib_send_wr *bad_send_wr;
1916         unsigned long flags;
1917         int ret;
1918
1919         if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
1920             (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
1921                 return -EINVAL;
1922
1923         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1924
1925         spin_lock_irqsave(&cm_id_priv->lock, flags);
1926         switch (cm_id->state) {
1927         case IB_CM_REQ_SENT:
1928         case IB_CM_MRA_REQ_RCVD:
1929         case IB_CM_REQ_RCVD:
1930         case IB_CM_MRA_REQ_SENT:
1931         case IB_CM_REP_RCVD:
1932         case IB_CM_MRA_REP_SENT:
1933                 ret = cm_alloc_msg(cm_id_priv, &msg);
1934                 if (!ret)
1935                         cm_format_rej((struct cm_rej_msg *) msg->mad,
1936                                       cm_id_priv, reason, ari, ari_length,
1937                                       private_data, private_data_len);
1938
1939                 cm_reset_to_idle(cm_id_priv);
1940                 break;
1941         case IB_CM_REP_SENT:
1942         case IB_CM_MRA_REP_RCVD:
1943                 ret = cm_alloc_msg(cm_id_priv, &msg);
1944                 if (!ret)
1945                         cm_format_rej((struct cm_rej_msg *) msg->mad,
1946                                       cm_id_priv, reason, ari, ari_length,
1947                                       private_data, private_data_len);
1948
1949                 cm_enter_timewait(cm_id_priv);
1950                 break;
1951         default:
1952                 ret = -EINVAL;
1953                 goto out;
1954         }
1955
1956         if (ret)
1957                 goto out;
1958
1959         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
1960                                &msg->send_wr, &bad_send_wr);
1961         if (ret)
1962                 cm_free_msg(msg);
1963
1964 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1965         return ret;
1966 }
1967 EXPORT_SYMBOL(ib_send_cm_rej);
1968
1969 static void cm_format_rej_event(struct cm_work *work)
1970 {
1971         struct cm_rej_msg *rej_msg;
1972         struct ib_cm_rej_event_param *param;
1973
1974         rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
1975         param = &work->cm_event.param.rej_rcvd;
1976         param->ari = rej_msg->ari;
1977         param->ari_length = cm_rej_get_reject_info_len(rej_msg);
1978         param->reason = __be16_to_cpu(rej_msg->reason);
1979         work->cm_event.private_data = &rej_msg->private_data;
1980 }
1981
1982 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
1983 {
1984         struct cm_timewait_info *timewait_info;
1985         struct cm_id_private *cm_id_priv;
1986         unsigned long flags;
1987         __be32 remote_id;
1988
1989         remote_id = rej_msg->local_comm_id;
1990
1991         if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
1992                 spin_lock_irqsave(&cm.lock, flags);
1993                 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
1994                                                   remote_id);
1995                 if (!timewait_info) {
1996                         spin_unlock_irqrestore(&cm.lock, flags);
1997                         return NULL;
1998                 }
1999                 cm_id_priv = idr_find(&cm.local_id_table,
2000                                       (__force int) timewait_info->work.local_id);
2001                 if (cm_id_priv) {
2002                         if (cm_id_priv->id.remote_id == remote_id)
2003                                 atomic_inc(&cm_id_priv->refcount);
2004                         else
2005                                 cm_id_priv = NULL;
2006                 }
2007                 spin_unlock_irqrestore(&cm.lock, flags);
2008         } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2009                 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2010         else
2011                 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2012
2013         return cm_id_priv;
2014 }
2015
2016 static int cm_rej_handler(struct cm_work *work)
2017 {
2018         struct cm_id_private *cm_id_priv;
2019         struct cm_rej_msg *rej_msg;
2020         unsigned long flags;
2021         int ret;
2022
2023         rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2024         cm_id_priv = cm_acquire_rejected_id(rej_msg);
2025         if (!cm_id_priv)
2026                 return -EINVAL;
2027
2028         cm_format_rej_event(work);
2029
2030         spin_lock_irqsave(&cm_id_priv->lock, flags);
2031         switch (cm_id_priv->id.state) {
2032         case IB_CM_REQ_SENT:
2033         case IB_CM_MRA_REQ_RCVD:
2034         case IB_CM_REP_SENT:
2035         case IB_CM_MRA_REP_RCVD:
2036                 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2037                               (unsigned long) cm_id_priv->msg);
2038                 /* fall through */
2039         case IB_CM_REQ_RCVD:
2040         case IB_CM_MRA_REQ_SENT:
2041                 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2042                         cm_enter_timewait(cm_id_priv);
2043                 else
2044                         cm_reset_to_idle(cm_id_priv);
2045                 break;
2046         case IB_CM_DREQ_SENT:
2047                 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2048                               (unsigned long) cm_id_priv->msg);
2049                 /* fall through */
2050         case IB_CM_REP_RCVD:
2051         case IB_CM_MRA_REP_SENT:
2052         case IB_CM_ESTABLISHED:
2053                 cm_enter_timewait(cm_id_priv);
2054                 break;
2055         default:
2056                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2057                 ret = -EINVAL;
2058                 goto out;
2059         }
2060
2061         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2062         if (!ret)
2063                 list_add_tail(&work->list, &cm_id_priv->work_list);
2064         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2065
2066         if (ret)
2067                 cm_process_work(cm_id_priv, work);
2068         else
2069                 cm_deref_id(cm_id_priv);
2070         return 0;
2071 out:
2072         cm_deref_id(cm_id_priv);
2073         return -EINVAL;
2074 }
2075
2076 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2077                    u8 service_timeout,
2078                    const void *private_data,
2079                    u8 private_data_len)
2080 {
2081         struct cm_id_private *cm_id_priv;
2082         struct ib_mad_send_buf *msg;
2083         struct ib_send_wr *bad_send_wr;
2084         void *data;
2085         unsigned long flags;
2086         int ret;
2087
2088         if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2089                 return -EINVAL;
2090
2091         data = cm_copy_private_data(private_data, private_data_len);
2092         if (IS_ERR(data))
2093                 return PTR_ERR(data);
2094
2095         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2096
2097         spin_lock_irqsave(&cm_id_priv->lock, flags);
2098         switch(cm_id_priv->id.state) {
2099         case IB_CM_REQ_RCVD:
2100                 ret = cm_alloc_msg(cm_id_priv, &msg);
2101                 if (ret)
2102                         goto error1;
2103
2104                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2105                               CM_MSG_RESPONSE_REQ, service_timeout,
2106                               private_data, private_data_len);
2107                 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2108                                        &msg->send_wr, &bad_send_wr);
2109                 if (ret)
2110                         goto error2;
2111                 cm_id->state = IB_CM_MRA_REQ_SENT;
2112                 break;
2113         case IB_CM_REP_RCVD:
2114                 ret = cm_alloc_msg(cm_id_priv, &msg);
2115                 if (ret)
2116                         goto error1;
2117
2118                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2119                               CM_MSG_RESPONSE_REP, service_timeout,
2120                               private_data, private_data_len);
2121                 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2122                                        &msg->send_wr, &bad_send_wr);
2123                 if (ret)
2124                         goto error2;
2125                 cm_id->state = IB_CM_MRA_REP_SENT;
2126                 break;
2127         case IB_CM_ESTABLISHED:
2128                 ret = cm_alloc_msg(cm_id_priv, &msg);
2129                 if (ret)
2130                         goto error1;
2131
2132                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2133                               CM_MSG_RESPONSE_OTHER, service_timeout,
2134                               private_data, private_data_len);
2135                 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2136                                        &msg->send_wr, &bad_send_wr);
2137                 if (ret)
2138                         goto error2;
2139                 cm_id->lap_state = IB_CM_MRA_LAP_SENT;
2140                 break;
2141         default:
2142                 ret = -EINVAL;
2143                 goto error1;
2144         }
2145         cm_id_priv->service_timeout = service_timeout;
2146         cm_set_private_data(cm_id_priv, data, private_data_len);
2147         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2148         return 0;
2149
2150 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2151         kfree(data);
2152         return ret;
2153
2154 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2155         kfree(data);
2156         cm_free_msg(msg);
2157         return ret;
2158 }
2159 EXPORT_SYMBOL(ib_send_cm_mra);
2160
2161 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2162 {
2163         switch (cm_mra_get_msg_mraed(mra_msg)) {
2164         case CM_MSG_RESPONSE_REQ:
2165                 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2166         case CM_MSG_RESPONSE_REP:
2167         case CM_MSG_RESPONSE_OTHER:
2168                 return cm_acquire_id(mra_msg->remote_comm_id,
2169                                      mra_msg->local_comm_id);
2170         default:
2171                 return NULL;
2172         }
2173 }
2174
2175 static int cm_mra_handler(struct cm_work *work)
2176 {
2177         struct cm_id_private *cm_id_priv;
2178         struct cm_mra_msg *mra_msg;
2179         unsigned long flags;
2180         int timeout, ret;
2181
2182         mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2183         cm_id_priv = cm_acquire_mraed_id(mra_msg);
2184         if (!cm_id_priv)
2185                 return -EINVAL;
2186
2187         work->cm_event.private_data = &mra_msg->private_data;
2188         work->cm_event.param.mra_rcvd.service_timeout =
2189                                         cm_mra_get_service_timeout(mra_msg);
2190         timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2191                   cm_convert_to_ms(cm_id_priv->av.packet_life_time);
2192
2193         spin_lock_irqsave(&cm_id_priv->lock, flags);
2194         switch (cm_id_priv->id.state) {
2195         case IB_CM_REQ_SENT:
2196                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2197                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2198                                   (unsigned long) cm_id_priv->msg, timeout))
2199                         goto out;
2200                 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2201                 break;
2202         case IB_CM_REP_SENT:
2203                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2204                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2205                                   (unsigned long) cm_id_priv->msg, timeout))
2206                         goto out;
2207                 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2208                 break;
2209         case IB_CM_ESTABLISHED:
2210                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2211                     cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2212                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2213                                   (unsigned long) cm_id_priv->msg, timeout))
2214                         goto out;
2215                 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2216                 break;
2217         default:
2218                 goto out;
2219         }
2220
2221         cm_id_priv->msg->context[1] = (void *) (unsigned long)
2222                                       cm_id_priv->id.state;
2223         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2224         if (!ret)
2225                 list_add_tail(&work->list, &cm_id_priv->work_list);
2226         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2227
2228         if (ret)
2229                 cm_process_work(cm_id_priv, work);
2230         else
2231                 cm_deref_id(cm_id_priv);
2232         return 0;
2233 out:
2234         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2235         cm_deref_id(cm_id_priv);
2236         return -EINVAL;
2237 }
2238
2239 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2240                           struct cm_id_private *cm_id_priv,
2241                           struct ib_sa_path_rec *alternate_path,
2242                           const void *private_data,
2243                           u8 private_data_len)
2244 {
2245         cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2246                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2247         lap_msg->local_comm_id = cm_id_priv->id.local_id;
2248         lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2249         cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2250         /* todo: need remote CM response timeout */
2251         cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2252         lap_msg->alt_local_lid = alternate_path->slid;
2253         lap_msg->alt_remote_lid = alternate_path->dlid;
2254         lap_msg->alt_local_gid = alternate_path->sgid;
2255         lap_msg->alt_remote_gid = alternate_path->dgid;
2256         cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2257         cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2258         lap_msg->alt_hop_limit = alternate_path->hop_limit;
2259         cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2260         cm_lap_set_sl(lap_msg, alternate_path->sl);
2261         cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2262         cm_lap_set_local_ack_timeout(lap_msg,
2263                 min(31, alternate_path->packet_life_time + 1));
2264
2265         if (private_data && private_data_len)
2266                 memcpy(lap_msg->private_data, private_data, private_data_len);
2267 }
2268
2269 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2270                    struct ib_sa_path_rec *alternate_path,
2271                    const void *private_data,
2272                    u8 private_data_len)
2273 {
2274         struct cm_id_private *cm_id_priv;
2275         struct ib_mad_send_buf *msg;
2276         struct ib_send_wr *bad_send_wr;
2277         unsigned long flags;
2278         int ret;
2279
2280         if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2281                 return -EINVAL;
2282
2283         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2284         spin_lock_irqsave(&cm_id_priv->lock, flags);
2285         if (cm_id->state != IB_CM_ESTABLISHED ||
2286             cm_id->lap_state != IB_CM_LAP_IDLE) {
2287                 ret = -EINVAL;
2288                 goto out;
2289         }
2290
2291         ret = cm_alloc_msg(cm_id_priv, &msg);
2292         if (ret)
2293                 goto out;
2294
2295         cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2296                       alternate_path, private_data, private_data_len);
2297         msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2298         msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2299
2300         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2301                                &msg->send_wr, &bad_send_wr);
2302         if (ret) {
2303                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2304                 cm_free_msg(msg);
2305                 return ret;
2306         }
2307
2308         cm_id->lap_state = IB_CM_LAP_SENT;
2309         cm_id_priv->msg = msg;
2310
2311 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2312         return ret;
2313 }
2314 EXPORT_SYMBOL(ib_send_cm_lap);
2315
2316 static void cm_format_path_from_lap(struct ib_sa_path_rec *path,
2317                                     struct cm_lap_msg *lap_msg)
2318 {
2319         memset(path, 0, sizeof *path);
2320         path->dgid = lap_msg->alt_local_gid;
2321         path->sgid = lap_msg->alt_remote_gid;
2322         path->dlid = lap_msg->alt_local_lid;
2323         path->slid = lap_msg->alt_remote_lid;
2324         path->flow_label = cm_lap_get_flow_label(lap_msg);
2325         path->hop_limit = lap_msg->alt_hop_limit;
2326         path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2327         path->reversible = 1;
2328         /* pkey is same as in REQ */
2329         path->sl = cm_lap_get_sl(lap_msg);
2330         path->mtu_selector = IB_SA_EQ;
2331         /* mtu is same as in REQ */
2332         path->rate_selector = IB_SA_EQ;
2333         path->rate = cm_lap_get_packet_rate(lap_msg);
2334         path->packet_life_time_selector = IB_SA_EQ;
2335         path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2336         path->packet_life_time -= (path->packet_life_time > 0);
2337 }
2338
2339 static int cm_lap_handler(struct cm_work *work)
2340 {
2341         struct cm_id_private *cm_id_priv;
2342         struct cm_lap_msg *lap_msg;
2343         struct ib_cm_lap_event_param *param;
2344         struct ib_mad_send_buf *msg = NULL;
2345         struct ib_send_wr *bad_send_wr;
2346         unsigned long flags;
2347         int ret;
2348
2349         /* todo: verify LAP request and send reject APR if invalid. */
2350         lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2351         cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2352                                    lap_msg->local_comm_id);
2353         if (!cm_id_priv)
2354                 return -EINVAL;
2355
2356         param = &work->cm_event.param.lap_rcvd;
2357         param->alternate_path = &work->path[0];
2358         cm_format_path_from_lap(param->alternate_path, lap_msg);
2359         work->cm_event.private_data = &lap_msg->private_data;
2360
2361         spin_lock_irqsave(&cm_id_priv->lock, flags);
2362         if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2363                 goto unlock;
2364
2365         switch (cm_id_priv->id.lap_state) {
2366         case IB_CM_LAP_IDLE:
2367                 break;
2368         case IB_CM_MRA_LAP_SENT:
2369                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2370                         goto unlock;
2371
2372                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2373                               CM_MSG_RESPONSE_OTHER,
2374                               cm_id_priv->service_timeout,
2375                               cm_id_priv->private_data,
2376                               cm_id_priv->private_data_len);
2377                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2378
2379                 if (ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2380                                      &msg->send_wr, &bad_send_wr))
2381                         cm_free_msg(msg);
2382                 goto deref;
2383         default:
2384                 goto unlock;
2385         }
2386
2387         cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2388         cm_id_priv->tid = lap_msg->hdr.tid;
2389         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2390         if (!ret)
2391                 list_add_tail(&work->list, &cm_id_priv->work_list);
2392         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2393
2394         if (ret)
2395                 cm_process_work(cm_id_priv, work);
2396         else
2397                 cm_deref_id(cm_id_priv);
2398         return 0;
2399
2400 unlock: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2401 deref:  cm_deref_id(cm_id_priv);
2402         return -EINVAL;
2403 }
2404
2405 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2406                           struct cm_id_private *cm_id_priv,
2407                           enum ib_cm_apr_status status,
2408                           void *info,
2409                           u8 info_length,
2410                           const void *private_data,
2411                           u8 private_data_len)
2412 {
2413         cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2414         apr_msg->local_comm_id = cm_id_priv->id.local_id;
2415         apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2416         apr_msg->ap_status = (u8) status;
2417
2418         if (info && info_length) {
2419                 apr_msg->info_length = info_length;
2420                 memcpy(apr_msg->info, info, info_length);
2421         }
2422
2423         if (private_data && private_data_len)
2424                 memcpy(apr_msg->private_data, private_data, private_data_len);
2425 }
2426
2427 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2428                    enum ib_cm_apr_status status,
2429                    void *info,
2430                    u8 info_length,
2431                    const void *private_data,
2432                    u8 private_data_len)
2433 {
2434         struct cm_id_private *cm_id_priv;
2435         struct ib_mad_send_buf *msg;
2436         struct ib_send_wr *bad_send_wr;
2437         unsigned long flags;
2438         int ret;
2439
2440         if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2441             (info && info_length > IB_CM_APR_INFO_LENGTH))
2442                 return -EINVAL;
2443
2444         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2445         spin_lock_irqsave(&cm_id_priv->lock, flags);
2446         if (cm_id->state != IB_CM_ESTABLISHED ||
2447             (cm_id->lap_state != IB_CM_LAP_RCVD &&
2448              cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2449                 ret = -EINVAL;
2450                 goto out;
2451         }
2452
2453         ret = cm_alloc_msg(cm_id_priv, &msg);
2454         if (ret)
2455                 goto out;
2456
2457         cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2458                       info, info_length, private_data, private_data_len);
2459         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2460                                &msg->send_wr, &bad_send_wr);
2461         if (ret) {
2462                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2463                 cm_free_msg(msg);
2464                 return ret;
2465         }
2466
2467         cm_id->lap_state = IB_CM_LAP_IDLE;
2468 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2469         return ret;
2470 }
2471 EXPORT_SYMBOL(ib_send_cm_apr);
2472
2473 static int cm_apr_handler(struct cm_work *work)
2474 {
2475         struct cm_id_private *cm_id_priv;
2476         struct cm_apr_msg *apr_msg;
2477         unsigned long flags;
2478         int ret;
2479
2480         apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2481         cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2482                                    apr_msg->local_comm_id);
2483         if (!cm_id_priv)
2484                 return -EINVAL; /* Unmatched reply. */
2485
2486         work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2487         work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2488         work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2489         work->cm_event.private_data = &apr_msg->private_data;
2490
2491         spin_lock_irqsave(&cm_id_priv->lock, flags);
2492         if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2493             (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2494              cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2495                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2496                 goto out;
2497         }
2498         cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2499         ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2500                       (unsigned long) cm_id_priv->msg);
2501         cm_id_priv->msg = NULL;
2502
2503         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2504         if (!ret)
2505                 list_add_tail(&work->list, &cm_id_priv->work_list);
2506         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2507
2508         if (ret)
2509                 cm_process_work(cm_id_priv, work);
2510         else
2511                 cm_deref_id(cm_id_priv);
2512         return 0;
2513 out:
2514         cm_deref_id(cm_id_priv);
2515         return -EINVAL;
2516 }
2517
2518 static int cm_timewait_handler(struct cm_work *work)
2519 {
2520         struct cm_timewait_info *timewait_info;
2521         struct cm_id_private *cm_id_priv;
2522         unsigned long flags;
2523         int ret;
2524
2525         timewait_info = (struct cm_timewait_info *)work;
2526         cm_cleanup_timewait(timewait_info);
2527
2528         cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2529                                    timewait_info->work.remote_id);
2530         if (!cm_id_priv)
2531                 return -EINVAL;
2532
2533         spin_lock_irqsave(&cm_id_priv->lock, flags);
2534         if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2535             cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2536                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2537                 goto out;
2538         }
2539         cm_id_priv->id.state = IB_CM_IDLE;
2540         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2541         if (!ret)
2542                 list_add_tail(&work->list, &cm_id_priv->work_list);
2543         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2544
2545         if (ret)
2546                 cm_process_work(cm_id_priv, work);
2547         else
2548                 cm_deref_id(cm_id_priv);
2549         return 0;
2550 out:
2551         cm_deref_id(cm_id_priv);
2552         return -EINVAL;
2553 }
2554
2555 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2556                                struct cm_id_private *cm_id_priv,
2557                                struct ib_cm_sidr_req_param *param)
2558 {
2559         cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2560                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2561         sidr_req_msg->request_id = cm_id_priv->id.local_id;
2562         sidr_req_msg->pkey = cpu_to_be16(param->pkey);
2563         sidr_req_msg->service_id = param->service_id;
2564
2565         if (param->private_data && param->private_data_len)
2566                 memcpy(sidr_req_msg->private_data, param->private_data,
2567                        param->private_data_len);
2568 }
2569
2570 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2571                         struct ib_cm_sidr_req_param *param)
2572 {
2573         struct cm_id_private *cm_id_priv;
2574         struct ib_mad_send_buf *msg;
2575         struct ib_send_wr *bad_send_wr;
2576         unsigned long flags;
2577         int ret;
2578
2579         if (!param->path || (param->private_data &&
2580              param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2581                 return -EINVAL;
2582
2583         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2584         ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2585         if (ret)
2586                 goto out;
2587
2588         cm_id->service_id = param->service_id;
2589         cm_id->service_mask = __constant_cpu_to_be64(~0ULL);
2590         cm_id_priv->timeout_ms = param->timeout_ms;
2591         cm_id_priv->max_cm_retries = param->max_cm_retries;
2592         ret = cm_alloc_msg(cm_id_priv, &msg);
2593         if (ret)
2594                 goto out;
2595
2596         cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2597                            param);
2598         msg->send_wr.wr.ud.timeout_ms = cm_id_priv->timeout_ms;
2599         msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2600
2601         spin_lock_irqsave(&cm_id_priv->lock, flags);
2602         if (cm_id->state == IB_CM_IDLE)
2603                 ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2604                                        &msg->send_wr, &bad_send_wr);
2605         else
2606                 ret = -EINVAL;
2607
2608         if (ret) {
2609                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2610                 cm_free_msg(msg);
2611                 goto out;
2612         }
2613         cm_id->state = IB_CM_SIDR_REQ_SENT;
2614         cm_id_priv->msg = msg;
2615         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2616 out:
2617         return ret;
2618 }
2619 EXPORT_SYMBOL(ib_send_cm_sidr_req);
2620
2621 static void cm_format_sidr_req_event(struct cm_work *work,
2622                                      struct ib_cm_id *listen_id)
2623 {
2624         struct cm_sidr_req_msg *sidr_req_msg;
2625         struct ib_cm_sidr_req_event_param *param;
2626
2627         sidr_req_msg = (struct cm_sidr_req_msg *)
2628                                 work->mad_recv_wc->recv_buf.mad;
2629         param = &work->cm_event.param.sidr_req_rcvd;
2630         param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2631         param->listen_id = listen_id;
2632         param->device = work->port->mad_agent->device;
2633         param->port = work->port->port_num;
2634         work->cm_event.private_data = &sidr_req_msg->private_data;
2635 }
2636
2637 static int cm_sidr_req_handler(struct cm_work *work)
2638 {
2639         struct ib_cm_id *cm_id;
2640         struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2641         struct cm_sidr_req_msg *sidr_req_msg;
2642         struct ib_wc *wc;
2643         unsigned long flags;
2644
2645         cm_id = ib_create_cm_id(NULL, NULL);
2646         if (IS_ERR(cm_id))
2647                 return PTR_ERR(cm_id);
2648         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2649
2650         /* Record SGID/SLID and request ID for lookup. */
2651         sidr_req_msg = (struct cm_sidr_req_msg *)
2652                                 work->mad_recv_wc->recv_buf.mad;
2653         wc = work->mad_recv_wc->wc;
2654         cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2655         cm_id_priv->av.dgid.global.interface_id = 0;
2656         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2657                                 &cm_id_priv->av);
2658         cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2659         cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
2660         cm_id_priv->tid = sidr_req_msg->hdr.tid;
2661         atomic_inc(&cm_id_priv->work_count);
2662
2663         spin_lock_irqsave(&cm.lock, flags);
2664         cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2665         if (cur_cm_id_priv) {
2666                 spin_unlock_irqrestore(&cm.lock, flags);
2667                 goto out; /* Duplicate message. */
2668         }
2669         cur_cm_id_priv = cm_find_listen(sidr_req_msg->service_id);
2670         if (!cur_cm_id_priv) {
2671                 rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2672                 spin_unlock_irqrestore(&cm.lock, flags);
2673                 /* todo: reply with no match */
2674                 goto out; /* No match. */
2675         }
2676         atomic_inc(&cur_cm_id_priv->refcount);
2677         spin_unlock_irqrestore(&cm.lock, flags);
2678
2679         cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
2680         cm_id_priv->id.context = cur_cm_id_priv->id.context;
2681         cm_id_priv->id.service_id = sidr_req_msg->service_id;
2682         cm_id_priv->id.service_mask = __constant_cpu_to_be64(~0ULL);
2683
2684         cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
2685         cm_process_work(cm_id_priv, work);
2686         cm_deref_id(cur_cm_id_priv);
2687         return 0;
2688 out:
2689         ib_destroy_cm_id(&cm_id_priv->id);
2690         return -EINVAL;
2691 }
2692
2693 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
2694                                struct cm_id_private *cm_id_priv,
2695                                struct ib_cm_sidr_rep_param *param)
2696 {
2697         cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
2698                           cm_id_priv->tid);
2699         sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
2700         sidr_rep_msg->status = param->status;
2701         cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
2702         sidr_rep_msg->service_id = cm_id_priv->id.service_id;
2703         sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
2704
2705         if (param->info && param->info_length)
2706                 memcpy(sidr_rep_msg->info, param->info, param->info_length);
2707
2708         if (param->private_data && param->private_data_len)
2709                 memcpy(sidr_rep_msg->private_data, param->private_data,
2710                        param->private_data_len);
2711 }
2712
2713 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
2714                         struct ib_cm_sidr_rep_param *param)
2715 {
2716         struct cm_id_private *cm_id_priv;
2717         struct ib_mad_send_buf *msg;
2718         struct ib_send_wr *bad_send_wr;
2719         unsigned long flags;
2720         int ret;
2721
2722         if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
2723             (param->private_data &&
2724              param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
2725                 return -EINVAL;
2726
2727         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2728         spin_lock_irqsave(&cm_id_priv->lock, flags);
2729         if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
2730                 ret = -EINVAL;
2731                 goto error;
2732         }
2733
2734         ret = cm_alloc_msg(cm_id_priv, &msg);
2735         if (ret)
2736                 goto error;
2737
2738         cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
2739                            param);
2740         ret = ib_post_send_mad(cm_id_priv->av.port->mad_agent,
2741                                &msg->send_wr, &bad_send_wr);
2742         if (ret) {
2743                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2744                 cm_free_msg(msg);
2745                 return ret;
2746         }
2747         cm_id->state = IB_CM_IDLE;
2748         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2749
2750         spin_lock_irqsave(&cm.lock, flags);
2751         rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
2752         spin_unlock_irqrestore(&cm.lock, flags);
2753         return 0;
2754
2755 error:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2756         return ret;
2757 }
2758 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
2759
2760 static void cm_format_sidr_rep_event(struct cm_work *work)
2761 {
2762         struct cm_sidr_rep_msg *sidr_rep_msg;
2763         struct ib_cm_sidr_rep_event_param *param;
2764
2765         sidr_rep_msg = (struct cm_sidr_rep_msg *)
2766                                 work->mad_recv_wc->recv_buf.mad;
2767         param = &work->cm_event.param.sidr_rep_rcvd;
2768         param->status = sidr_rep_msg->status;
2769         param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
2770         param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
2771         param->info = &sidr_rep_msg->info;
2772         param->info_len = sidr_rep_msg->info_length;
2773         work->cm_event.private_data = &sidr_rep_msg->private_data;
2774 }
2775
2776 static int cm_sidr_rep_handler(struct cm_work *work)
2777 {
2778         struct cm_sidr_rep_msg *sidr_rep_msg;
2779         struct cm_id_private *cm_id_priv;
2780         unsigned long flags;
2781
2782         sidr_rep_msg = (struct cm_sidr_rep_msg *)
2783                                 work->mad_recv_wc->recv_buf.mad;
2784         cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
2785         if (!cm_id_priv)
2786                 return -EINVAL; /* Unmatched reply. */
2787
2788         spin_lock_irqsave(&cm_id_priv->lock, flags);
2789         if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
2790                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2791                 goto out;
2792         }
2793         cm_id_priv->id.state = IB_CM_IDLE;
2794         ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2795                       (unsigned long) cm_id_priv->msg);
2796         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2797
2798         cm_format_sidr_rep_event(work);
2799         cm_process_work(cm_id_priv, work);
2800         return 0;
2801 out:
2802         cm_deref_id(cm_id_priv);
2803         return -EINVAL;
2804 }
2805
2806 static void cm_process_send_error(struct ib_mad_send_buf *msg,
2807                                   enum ib_wc_status wc_status)
2808 {
2809         struct cm_id_private *cm_id_priv;
2810         struct ib_cm_event cm_event;
2811         enum ib_cm_state state;
2812         unsigned long flags;
2813         int ret;
2814
2815         memset(&cm_event, 0, sizeof cm_event);
2816         cm_id_priv = msg->context[0];
2817
2818         /* Discard old sends or ones without a response. */
2819         spin_lock_irqsave(&cm_id_priv->lock, flags);
2820         state = (enum ib_cm_state) (unsigned long) msg->context[1];
2821         if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
2822                 goto discard;
2823
2824         switch (state) {
2825         case IB_CM_REQ_SENT:
2826         case IB_CM_MRA_REQ_RCVD:
2827                 cm_reset_to_idle(cm_id_priv);
2828                 cm_event.event = IB_CM_REQ_ERROR;
2829                 break;
2830         case IB_CM_REP_SENT:
2831         case IB_CM_MRA_REP_RCVD:
2832                 cm_reset_to_idle(cm_id_priv);
2833                 cm_event.event = IB_CM_REP_ERROR;
2834                 break;
2835         case IB_CM_DREQ_SENT:
2836                 cm_enter_timewait(cm_id_priv);
2837                 cm_event.event = IB_CM_DREQ_ERROR;
2838                 break;
2839         case IB_CM_SIDR_REQ_SENT:
2840                 cm_id_priv->id.state = IB_CM_IDLE;
2841                 cm_event.event = IB_CM_SIDR_REQ_ERROR;
2842                 break;
2843         default:
2844                 goto discard;
2845         }
2846         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2847         cm_event.param.send_status = wc_status;
2848
2849         /* No other events can occur on the cm_id at this point. */
2850         ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
2851         cm_free_msg(msg);
2852         if (ret)
2853                 ib_destroy_cm_id(&cm_id_priv->id);
2854         return;
2855 discard:
2856         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2857         cm_free_msg(msg);
2858 }
2859
2860 static void cm_send_handler(struct ib_mad_agent *mad_agent,
2861                             struct ib_mad_send_wc *mad_send_wc)
2862 {
2863         struct ib_mad_send_buf *msg;
2864
2865         msg = (struct ib_mad_send_buf *)(unsigned long)mad_send_wc->wr_id;
2866
2867         switch (mad_send_wc->status) {
2868         case IB_WC_SUCCESS:
2869         case IB_WC_WR_FLUSH_ERR:
2870                 cm_free_msg(msg);
2871                 break;
2872         default:
2873                 if (msg->context[0] && msg->context[1])
2874                         cm_process_send_error(msg, mad_send_wc->status);
2875                 else
2876                         cm_free_msg(msg);
2877                 break;
2878         }
2879 }
2880
2881 static void cm_work_handler(void *data)
2882 {
2883         struct cm_work *work = data;
2884         int ret;
2885
2886         switch (work->cm_event.event) {
2887         case IB_CM_REQ_RECEIVED:
2888                 ret = cm_req_handler(work);
2889                 break;
2890         case IB_CM_MRA_RECEIVED:
2891                 ret = cm_mra_handler(work);
2892                 break;
2893         case IB_CM_REJ_RECEIVED:
2894                 ret = cm_rej_handler(work);
2895                 break;
2896         case IB_CM_REP_RECEIVED:
2897                 ret = cm_rep_handler(work);
2898                 break;
2899         case IB_CM_RTU_RECEIVED:
2900                 ret = cm_rtu_handler(work);
2901                 break;
2902         case IB_CM_USER_ESTABLISHED:
2903                 ret = cm_establish_handler(work);
2904                 break;
2905         case IB_CM_DREQ_RECEIVED:
2906                 ret = cm_dreq_handler(work);
2907                 break;
2908         case IB_CM_DREP_RECEIVED:
2909                 ret = cm_drep_handler(work);
2910                 break;
2911         case IB_CM_SIDR_REQ_RECEIVED:
2912                 ret = cm_sidr_req_handler(work);
2913                 break;
2914         case IB_CM_SIDR_REP_RECEIVED:
2915                 ret = cm_sidr_rep_handler(work);
2916                 break;
2917         case IB_CM_LAP_RECEIVED:
2918                 ret = cm_lap_handler(work);
2919                 break;
2920         case IB_CM_APR_RECEIVED:
2921                 ret = cm_apr_handler(work);
2922                 break;
2923         case IB_CM_TIMEWAIT_EXIT:
2924                 ret = cm_timewait_handler(work);
2925                 break;
2926         default:
2927                 ret = -EINVAL;
2928                 break;
2929         }
2930         if (ret)
2931                 cm_free_work(work);
2932 }
2933
2934 int ib_cm_establish(struct ib_cm_id *cm_id)
2935 {
2936         struct cm_id_private *cm_id_priv;
2937         struct cm_work *work;
2938         unsigned long flags;
2939         int ret = 0;
2940
2941         work = kmalloc(sizeof *work, GFP_ATOMIC);
2942         if (!work)
2943                 return -ENOMEM;
2944
2945         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2946         spin_lock_irqsave(&cm_id_priv->lock, flags);
2947         switch (cm_id->state)
2948         {
2949         case IB_CM_REP_SENT:
2950         case IB_CM_MRA_REP_RCVD:
2951                 cm_id->state = IB_CM_ESTABLISHED;
2952                 break;
2953         case IB_CM_ESTABLISHED:
2954                 ret = -EISCONN;
2955                 break;
2956         default:
2957                 ret = -EINVAL;
2958                 break;
2959         }
2960         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2961
2962         if (ret) {
2963                 kfree(work);
2964                 goto out;
2965         }
2966
2967         /*
2968          * The CM worker thread may try to destroy the cm_id before it
2969          * can execute this work item.  To prevent potential deadlock,
2970          * we need to find the cm_id once we're in the context of the
2971          * worker thread, rather than holding a reference on it.
2972          */
2973         INIT_WORK(&work->work, cm_work_handler, work);
2974         work->local_id = cm_id->local_id;
2975         work->remote_id = cm_id->remote_id;
2976         work->mad_recv_wc = NULL;
2977         work->cm_event.event = IB_CM_USER_ESTABLISHED;
2978         queue_work(cm.wq, &work->work);
2979 out:
2980         return ret;
2981 }
2982 EXPORT_SYMBOL(ib_cm_establish);
2983
2984 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
2985                             struct ib_mad_recv_wc *mad_recv_wc)
2986 {
2987         struct cm_work *work;
2988         enum ib_cm_event_type event;
2989         int paths = 0;
2990
2991         switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
2992         case CM_REQ_ATTR_ID:
2993                 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
2994                                                     alt_local_lid != 0);
2995                 event = IB_CM_REQ_RECEIVED;
2996                 break;
2997         case CM_MRA_ATTR_ID:
2998                 event = IB_CM_MRA_RECEIVED;
2999                 break;
3000         case CM_REJ_ATTR_ID:
3001                 event = IB_CM_REJ_RECEIVED;
3002                 break;
3003         case CM_REP_ATTR_ID:
3004                 event = IB_CM_REP_RECEIVED;
3005                 break;
3006         case CM_RTU_ATTR_ID:
3007                 event = IB_CM_RTU_RECEIVED;
3008                 break;
3009         case CM_DREQ_ATTR_ID:
3010                 event = IB_CM_DREQ_RECEIVED;
3011                 break;
3012         case CM_DREP_ATTR_ID:
3013                 event = IB_CM_DREP_RECEIVED;
3014                 break;
3015         case CM_SIDR_REQ_ATTR_ID:
3016                 event = IB_CM_SIDR_REQ_RECEIVED;
3017                 break;
3018         case CM_SIDR_REP_ATTR_ID:
3019                 event = IB_CM_SIDR_REP_RECEIVED;
3020                 break;
3021         case CM_LAP_ATTR_ID:
3022                 paths = 1;
3023                 event = IB_CM_LAP_RECEIVED;
3024                 break;
3025         case CM_APR_ATTR_ID:
3026                 event = IB_CM_APR_RECEIVED;
3027                 break;
3028         default:
3029                 ib_free_recv_mad(mad_recv_wc);
3030                 return;
3031         }
3032
3033         work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3034                        GFP_KERNEL);
3035         if (!work) {
3036                 ib_free_recv_mad(mad_recv_wc);
3037                 return;
3038         }
3039
3040         INIT_WORK(&work->work, cm_work_handler, work);
3041         work->cm_event.event = event;
3042         work->mad_recv_wc = mad_recv_wc;
3043         work->port = (struct cm_port *)mad_agent->context;
3044         queue_work(cm.wq, &work->work);
3045 }
3046
3047 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3048                                 struct ib_qp_attr *qp_attr,
3049                                 int *qp_attr_mask)
3050 {
3051         unsigned long flags;
3052         int ret;
3053
3054         spin_lock_irqsave(&cm_id_priv->lock, flags);
3055         switch (cm_id_priv->id.state) {
3056         case IB_CM_REQ_SENT:
3057         case IB_CM_MRA_REQ_RCVD:
3058         case IB_CM_REQ_RCVD:
3059         case IB_CM_MRA_REQ_SENT:
3060         case IB_CM_REP_RCVD:
3061         case IB_CM_MRA_REP_SENT:
3062         case IB_CM_REP_SENT:
3063         case IB_CM_MRA_REP_RCVD:
3064         case IB_CM_ESTABLISHED:
3065                 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3066                                 IB_QP_PKEY_INDEX | IB_QP_PORT;
3067                 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
3068                 if (cm_id_priv->responder_resources)
3069                         qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_WRITE |
3070                                                     IB_ACCESS_REMOTE_READ;
3071                 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3072                 qp_attr->port_num = cm_id_priv->av.port->port_num;
3073                 ret = 0;
3074                 break;
3075         default:
3076                 ret = -EINVAL;
3077                 break;
3078         }
3079         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3080         return ret;
3081 }
3082
3083 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3084                                struct ib_qp_attr *qp_attr,
3085                                int *qp_attr_mask)
3086 {
3087         unsigned long flags;
3088         int ret;
3089
3090         spin_lock_irqsave(&cm_id_priv->lock, flags);
3091         switch (cm_id_priv->id.state) {
3092         case IB_CM_REQ_RCVD:
3093         case IB_CM_MRA_REQ_SENT:
3094         case IB_CM_REP_RCVD:
3095         case IB_CM_MRA_REP_SENT:
3096         case IB_CM_REP_SENT:
3097         case IB_CM_MRA_REP_RCVD:
3098         case IB_CM_ESTABLISHED:
3099                 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3100                                 IB_QP_DEST_QPN | IB_QP_RQ_PSN |
3101                                 IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
3102                 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3103                 qp_attr->path_mtu = cm_id_priv->path_mtu;
3104                 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3105                 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3106                 qp_attr->max_dest_rd_atomic = cm_id_priv->responder_resources;
3107                 qp_attr->min_rnr_timer = 0;
3108                 if (cm_id_priv->alt_av.ah_attr.dlid) {
3109                         *qp_attr_mask |= IB_QP_ALT_PATH;
3110                         qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3111                 }
3112                 ret = 0;
3113                 break;
3114         default:
3115                 ret = -EINVAL;
3116                 break;
3117         }
3118         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3119         return ret;
3120 }
3121
3122 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3123                                struct ib_qp_attr *qp_attr,
3124                                int *qp_attr_mask)
3125 {
3126         unsigned long flags;
3127         int ret;
3128
3129         spin_lock_irqsave(&cm_id_priv->lock, flags);
3130         switch (cm_id_priv->id.state) {
3131         case IB_CM_REP_RCVD:
3132         case IB_CM_MRA_REP_SENT:
3133         case IB_CM_REP_SENT:
3134         case IB_CM_MRA_REP_RCVD:
3135         case IB_CM_ESTABLISHED:
3136                 *qp_attr_mask = IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3137                                 IB_QP_RNR_RETRY | IB_QP_SQ_PSN |
3138                                 IB_QP_MAX_QP_RD_ATOMIC;
3139                 qp_attr->timeout = cm_id_priv->local_ack_timeout;
3140                 qp_attr->retry_cnt = cm_id_priv->retry_count;
3141                 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3142                 qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3143                 qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
3144                 if (cm_id_priv->alt_av.ah_attr.dlid) {
3145                         *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3146                         qp_attr->path_mig_state = IB_MIG_REARM;
3147                 }
3148                 ret = 0;
3149                 break;
3150         default:
3151                 ret = -EINVAL;
3152                 break;
3153         }
3154         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3155         return ret;
3156 }
3157
3158 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3159                        struct ib_qp_attr *qp_attr,
3160                        int *qp_attr_mask)
3161 {
3162         struct cm_id_private *cm_id_priv;
3163         int ret;
3164
3165         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3166         switch (qp_attr->qp_state) {
3167         case IB_QPS_INIT:
3168                 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3169                 break;
3170         case IB_QPS_RTR:
3171                 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3172                 break;
3173         case IB_QPS_RTS:
3174                 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3175                 break;
3176         default:
3177                 ret = -EINVAL;
3178                 break;
3179         }
3180         return ret;
3181 }
3182 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3183
3184 static __be64 cm_get_ca_guid(struct ib_device *device)
3185 {
3186         struct ib_device_attr *device_attr;
3187         __be64 guid;
3188         int ret;
3189
3190         device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
3191         if (!device_attr)
3192                 return 0;
3193
3194         ret = ib_query_device(device, device_attr);
3195         guid = ret ? 0 : device_attr->node_guid;
3196         kfree(device_attr);
3197         return guid;
3198 }
3199
3200 static void cm_add_one(struct ib_device *device)
3201 {
3202         struct cm_device *cm_dev;
3203         struct cm_port *port;
3204         struct ib_mad_reg_req reg_req = {
3205                 .mgmt_class = IB_MGMT_CLASS_CM,
3206                 .mgmt_class_version = IB_CM_CLASS_VERSION
3207         };
3208         struct ib_port_modify port_modify = {
3209                 .set_port_cap_mask = IB_PORT_CM_SUP
3210         };
3211         unsigned long flags;
3212         int ret;
3213         u8 i;
3214
3215         cm_dev = kmalloc(sizeof(*cm_dev) + sizeof(*port) *
3216                          device->phys_port_cnt, GFP_KERNEL);
3217         if (!cm_dev)
3218                 return;
3219
3220         cm_dev->device = device;
3221         cm_dev->ca_guid = cm_get_ca_guid(device);
3222         if (!cm_dev->ca_guid)
3223                 goto error1;
3224
3225         set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3226         for (i = 1; i <= device->phys_port_cnt; i++) {
3227                 port = &cm_dev->port[i-1];
3228                 port->cm_dev = cm_dev;
3229                 port->port_num = i;
3230                 port->mad_agent = ib_register_mad_agent(device, i,
3231                                                         IB_QPT_GSI,
3232                                                         &reg_req,
3233                                                         0,
3234                                                         cm_send_handler,
3235                                                         cm_recv_handler,
3236                                                         port);
3237                 if (IS_ERR(port->mad_agent))
3238                         goto error2;
3239
3240                 ret = ib_modify_port(device, i, 0, &port_modify);
3241                 if (ret)
3242                         goto error3;
3243         }
3244         ib_set_client_data(device, &cm_client, cm_dev);
3245
3246         write_lock_irqsave(&cm.device_lock, flags);
3247         list_add_tail(&cm_dev->list, &cm.device_list);
3248         write_unlock_irqrestore(&cm.device_lock, flags);
3249         return;
3250
3251 error3:
3252         ib_unregister_mad_agent(port->mad_agent);
3253 error2:
3254         port_modify.set_port_cap_mask = 0;
3255         port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3256         while (--i) {
3257                 port = &cm_dev->port[i-1];
3258                 ib_modify_port(device, port->port_num, 0, &port_modify);
3259                 ib_unregister_mad_agent(port->mad_agent);
3260         }
3261 error1:
3262         kfree(cm_dev);
3263 }
3264
3265 static void cm_remove_one(struct ib_device *device)
3266 {
3267         struct cm_device *cm_dev;
3268         struct cm_port *port;
3269         struct ib_port_modify port_modify = {
3270                 .clr_port_cap_mask = IB_PORT_CM_SUP
3271         };
3272         unsigned long flags;
3273         int i;
3274
3275         cm_dev = ib_get_client_data(device, &cm_client);
3276         if (!cm_dev)
3277                 return;
3278
3279         write_lock_irqsave(&cm.device_lock, flags);
3280         list_del(&cm_dev->list);
3281         write_unlock_irqrestore(&cm.device_lock, flags);
3282
3283         for (i = 1; i <= device->phys_port_cnt; i++) {
3284                 port = &cm_dev->port[i-1];
3285                 ib_modify_port(device, port->port_num, 0, &port_modify);
3286                 ib_unregister_mad_agent(port->mad_agent);
3287         }
3288         kfree(cm_dev);
3289 }
3290
3291 static int __init ib_cm_init(void)
3292 {
3293         int ret;
3294
3295         memset(&cm, 0, sizeof cm);
3296         INIT_LIST_HEAD(&cm.device_list);
3297         rwlock_init(&cm.device_lock);
3298         spin_lock_init(&cm.lock);
3299         cm.listen_service_table = RB_ROOT;
3300         cm.listen_service_id = __constant_be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3301         cm.remote_id_table = RB_ROOT;
3302         cm.remote_qp_table = RB_ROOT;
3303         cm.remote_sidr_table = RB_ROOT;
3304         idr_init(&cm.local_id_table);
3305         idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3306
3307         cm.wq = create_workqueue("ib_cm");
3308         if (!cm.wq)
3309                 return -ENOMEM;
3310
3311         ret = ib_register_client(&cm_client);
3312         if (ret)
3313                 goto error;
3314
3315         return 0;
3316 error:
3317         destroy_workqueue(cm.wq);
3318         return ret;
3319 }
3320
3321 static void __exit ib_cm_cleanup(void)
3322 {
3323         flush_workqueue(cm.wq);
3324         destroy_workqueue(cm.wq);
3325         ib_unregister_client(&cm_client);
3326 }
3327
3328 module_init(ib_cm_init);
3329 module_exit(ib_cm_cleanup);
3330