Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / infiniband / core / cm.c
1 /*
2  * Copyright (c) 2004-2007 Intel Corporation.  All rights reserved.
3  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
4  * Copyright (c) 2004, 2005 Voltaire Corporation.  All rights reserved.
5  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/completion.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/device.h>
39 #include <linux/err.h>
40 #include <linux/idr.h>
41 #include <linux/interrupt.h>
42 #include <linux/random.h>
43 #include <linux/rbtree.h>
44 #include <linux/spinlock.h>
45 #include <linux/slab.h>
46 #include <linux/sysfs.h>
47 #include <linux/workqueue.h>
48 #include <linux/kdev_t.h>
49
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_cm.h>
52 #include "cm_msgs.h"
53
54 MODULE_AUTHOR("Sean Hefty");
55 MODULE_DESCRIPTION("InfiniBand CM");
56 MODULE_LICENSE("Dual BSD/GPL");
57
58 static void cm_add_one(struct ib_device *device);
59 static void cm_remove_one(struct ib_device *device);
60
61 static struct ib_client cm_client = {
62         .name   = "cm",
63         .add    = cm_add_one,
64         .remove = cm_remove_one
65 };
66
67 static struct ib_cm {
68         spinlock_t lock;
69         struct list_head device_list;
70         rwlock_t device_lock;
71         struct rb_root listen_service_table;
72         u64 listen_service_id;
73         /* struct rb_root peer_service_table; todo: fix peer to peer */
74         struct rb_root remote_qp_table;
75         struct rb_root remote_id_table;
76         struct rb_root remote_sidr_table;
77         struct idr local_id_table;
78         __be32 random_id_operand;
79         struct list_head timewait_list;
80         struct workqueue_struct *wq;
81 } cm;
82
83 /* Counter indexes ordered by attribute ID */
84 enum {
85         CM_REQ_COUNTER,
86         CM_MRA_COUNTER,
87         CM_REJ_COUNTER,
88         CM_REP_COUNTER,
89         CM_RTU_COUNTER,
90         CM_DREQ_COUNTER,
91         CM_DREP_COUNTER,
92         CM_SIDR_REQ_COUNTER,
93         CM_SIDR_REP_COUNTER,
94         CM_LAP_COUNTER,
95         CM_APR_COUNTER,
96         CM_ATTR_COUNT,
97         CM_ATTR_ID_OFFSET = 0x0010,
98 };
99
100 enum {
101         CM_XMIT,
102         CM_XMIT_RETRIES,
103         CM_RECV,
104         CM_RECV_DUPLICATES,
105         CM_COUNTER_GROUPS
106 };
107
108 static char const counter_group_names[CM_COUNTER_GROUPS]
109                                      [sizeof("cm_rx_duplicates")] = {
110         "cm_tx_msgs", "cm_tx_retries",
111         "cm_rx_msgs", "cm_rx_duplicates"
112 };
113
114 struct cm_counter_group {
115         struct kobject obj;
116         atomic_long_t counter[CM_ATTR_COUNT];
117 };
118
119 struct cm_counter_attribute {
120         struct attribute attr;
121         int index;
122 };
123
124 #define CM_COUNTER_ATTR(_name, _index) \
125 struct cm_counter_attribute cm_##_name##_counter_attr = { \
126         .attr = { .name = __stringify(_name), .mode = 0444 }, \
127         .index = _index \
128 }
129
130 static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
131 static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
132 static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
133 static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
134 static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
135 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
136 static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
137 static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
138 static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
139 static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
140 static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
141
142 static struct attribute *cm_counter_default_attrs[] = {
143         &cm_req_counter_attr.attr,
144         &cm_mra_counter_attr.attr,
145         &cm_rej_counter_attr.attr,
146         &cm_rep_counter_attr.attr,
147         &cm_rtu_counter_attr.attr,
148         &cm_dreq_counter_attr.attr,
149         &cm_drep_counter_attr.attr,
150         &cm_sidr_req_counter_attr.attr,
151         &cm_sidr_rep_counter_attr.attr,
152         &cm_lap_counter_attr.attr,
153         &cm_apr_counter_attr.attr,
154         NULL
155 };
156
157 struct cm_port {
158         struct cm_device *cm_dev;
159         struct ib_mad_agent *mad_agent;
160         struct kobject port_obj;
161         u8 port_num;
162         struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
163 };
164
165 struct cm_device {
166         struct list_head list;
167         struct ib_device *ib_device;
168         struct device *device;
169         u8 ack_delay;
170         struct cm_port *port[0];
171 };
172
173 struct cm_av {
174         struct cm_port *port;
175         union ib_gid dgid;
176         struct ib_ah_attr ah_attr;
177         u16 pkey_index;
178         u8 timeout;
179 };
180
181 struct cm_work {
182         struct delayed_work work;
183         struct list_head list;
184         struct cm_port *port;
185         struct ib_mad_recv_wc *mad_recv_wc;     /* Received MADs */
186         __be32 local_id;                        /* Established / timewait */
187         __be32 remote_id;
188         struct ib_cm_event cm_event;
189         struct ib_sa_path_rec path[0];
190 };
191
192 struct cm_timewait_info {
193         struct cm_work work;                    /* Must be first. */
194         struct list_head list;
195         struct rb_node remote_qp_node;
196         struct rb_node remote_id_node;
197         __be64 remote_ca_guid;
198         __be32 remote_qpn;
199         u8 inserted_remote_qp;
200         u8 inserted_remote_id;
201 };
202
203 struct cm_id_private {
204         struct ib_cm_id id;
205
206         struct rb_node service_node;
207         struct rb_node sidr_id_node;
208         spinlock_t lock;        /* Do not acquire inside cm.lock */
209         struct completion comp;
210         atomic_t refcount;
211
212         struct ib_mad_send_buf *msg;
213         struct cm_timewait_info *timewait_info;
214         /* todo: use alternate port on send failure */
215         struct cm_av av;
216         struct cm_av alt_av;
217         struct ib_cm_compare_data *compare_data;
218
219         void *private_data;
220         __be64 tid;
221         __be32 local_qpn;
222         __be32 remote_qpn;
223         enum ib_qp_type qp_type;
224         __be32 sq_psn;
225         __be32 rq_psn;
226         int timeout_ms;
227         enum ib_mtu path_mtu;
228         __be16 pkey;
229         u8 private_data_len;
230         u8 max_cm_retries;
231         u8 peer_to_peer;
232         u8 responder_resources;
233         u8 initiator_depth;
234         u8 retry_count;
235         u8 rnr_retry_count;
236         u8 service_timeout;
237         u8 target_ack_delay;
238
239         struct list_head work_list;
240         atomic_t work_count;
241 };
242
243 static void cm_work_handler(struct work_struct *work);
244
245 static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
246 {
247         if (atomic_dec_and_test(&cm_id_priv->refcount))
248                 complete(&cm_id_priv->comp);
249 }
250
251 static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
252                         struct ib_mad_send_buf **msg)
253 {
254         struct ib_mad_agent *mad_agent;
255         struct ib_mad_send_buf *m;
256         struct ib_ah *ah;
257
258         mad_agent = cm_id_priv->av.port->mad_agent;
259         ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
260         if (IS_ERR(ah))
261                 return PTR_ERR(ah);
262
263         m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
264                                cm_id_priv->av.pkey_index,
265                                0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
266                                GFP_ATOMIC);
267         if (IS_ERR(m)) {
268                 ib_destroy_ah(ah);
269                 return PTR_ERR(m);
270         }
271
272         /* Timeout set by caller if response is expected. */
273         m->ah = ah;
274         m->retries = cm_id_priv->max_cm_retries;
275
276         atomic_inc(&cm_id_priv->refcount);
277         m->context[0] = cm_id_priv;
278         *msg = m;
279         return 0;
280 }
281
282 static int cm_alloc_response_msg(struct cm_port *port,
283                                  struct ib_mad_recv_wc *mad_recv_wc,
284                                  struct ib_mad_send_buf **msg)
285 {
286         struct ib_mad_send_buf *m;
287         struct ib_ah *ah;
288
289         ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
290                                   mad_recv_wc->recv_buf.grh, port->port_num);
291         if (IS_ERR(ah))
292                 return PTR_ERR(ah);
293
294         m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
295                                0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
296                                GFP_ATOMIC);
297         if (IS_ERR(m)) {
298                 ib_destroy_ah(ah);
299                 return PTR_ERR(m);
300         }
301         m->ah = ah;
302         *msg = m;
303         return 0;
304 }
305
306 static void cm_free_msg(struct ib_mad_send_buf *msg)
307 {
308         ib_destroy_ah(msg->ah);
309         if (msg->context[0])
310                 cm_deref_id(msg->context[0]);
311         ib_free_send_mad(msg);
312 }
313
314 static void * cm_copy_private_data(const void *private_data,
315                                    u8 private_data_len)
316 {
317         void *data;
318
319         if (!private_data || !private_data_len)
320                 return NULL;
321
322         data = kmemdup(private_data, private_data_len, GFP_KERNEL);
323         if (!data)
324                 return ERR_PTR(-ENOMEM);
325
326         return data;
327 }
328
329 static void cm_set_private_data(struct cm_id_private *cm_id_priv,
330                                  void *private_data, u8 private_data_len)
331 {
332         if (cm_id_priv->private_data && cm_id_priv->private_data_len)
333                 kfree(cm_id_priv->private_data);
334
335         cm_id_priv->private_data = private_data;
336         cm_id_priv->private_data_len = private_data_len;
337 }
338
339 static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
340                                     struct ib_grh *grh, struct cm_av *av)
341 {
342         av->port = port;
343         av->pkey_index = wc->pkey_index;
344         ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
345                            grh, &av->ah_attr);
346 }
347
348 static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
349 {
350         struct cm_device *cm_dev;
351         struct cm_port *port = NULL;
352         unsigned long flags;
353         int ret;
354         u8 p;
355
356         read_lock_irqsave(&cm.device_lock, flags);
357         list_for_each_entry(cm_dev, &cm.device_list, list) {
358                 if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
359                                         &p, NULL)) {
360                         port = cm_dev->port[p-1];
361                         break;
362                 }
363         }
364         read_unlock_irqrestore(&cm.device_lock, flags);
365
366         if (!port)
367                 return -EINVAL;
368
369         ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
370                                   be16_to_cpu(path->pkey), &av->pkey_index);
371         if (ret)
372                 return ret;
373
374         av->port = port;
375         ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
376                              &av->ah_attr);
377         av->timeout = path->packet_life_time + 1;
378         return 0;
379 }
380
381 static int cm_alloc_id(struct cm_id_private *cm_id_priv)
382 {
383         unsigned long flags;
384         int ret, id;
385         static int next_id;
386
387         do {
388                 spin_lock_irqsave(&cm.lock, flags);
389                 ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
390                                         next_id, &id);
391                 if (!ret)
392                         next_id = ((unsigned) id + 1) & MAX_ID_MASK;
393                 spin_unlock_irqrestore(&cm.lock, flags);
394         } while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
395
396         cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
397         return ret;
398 }
399
400 static void cm_free_id(__be32 local_id)
401 {
402         spin_lock_irq(&cm.lock);
403         idr_remove(&cm.local_id_table,
404                    (__force int) (local_id ^ cm.random_id_operand));
405         spin_unlock_irq(&cm.lock);
406 }
407
408 static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
409 {
410         struct cm_id_private *cm_id_priv;
411
412         cm_id_priv = idr_find(&cm.local_id_table,
413                               (__force int) (local_id ^ cm.random_id_operand));
414         if (cm_id_priv) {
415                 if (cm_id_priv->id.remote_id == remote_id)
416                         atomic_inc(&cm_id_priv->refcount);
417                 else
418                         cm_id_priv = NULL;
419         }
420
421         return cm_id_priv;
422 }
423
424 static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
425 {
426         struct cm_id_private *cm_id_priv;
427
428         spin_lock_irq(&cm.lock);
429         cm_id_priv = cm_get_id(local_id, remote_id);
430         spin_unlock_irq(&cm.lock);
431
432         return cm_id_priv;
433 }
434
435 static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
436 {
437         int i;
438
439         for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
440                 ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
441                                              ((unsigned long *) mask)[i];
442 }
443
444 static int cm_compare_data(struct ib_cm_compare_data *src_data,
445                            struct ib_cm_compare_data *dst_data)
446 {
447         u8 src[IB_CM_COMPARE_SIZE];
448         u8 dst[IB_CM_COMPARE_SIZE];
449
450         if (!src_data || !dst_data)
451                 return 0;
452
453         cm_mask_copy(src, src_data->data, dst_data->mask);
454         cm_mask_copy(dst, dst_data->data, src_data->mask);
455         return memcmp(src, dst, IB_CM_COMPARE_SIZE);
456 }
457
458 static int cm_compare_private_data(u8 *private_data,
459                                    struct ib_cm_compare_data *dst_data)
460 {
461         u8 src[IB_CM_COMPARE_SIZE];
462
463         if (!dst_data)
464                 return 0;
465
466         cm_mask_copy(src, private_data, dst_data->mask);
467         return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
468 }
469
470 /*
471  * Trivial helpers to strip endian annotation and compare; the
472  * endianness doesn't actually matter since we just need a stable
473  * order for the RB tree.
474  */
475 static int be32_lt(__be32 a, __be32 b)
476 {
477         return (__force u32) a < (__force u32) b;
478 }
479
480 static int be32_gt(__be32 a, __be32 b)
481 {
482         return (__force u32) a > (__force u32) b;
483 }
484
485 static int be64_lt(__be64 a, __be64 b)
486 {
487         return (__force u64) a < (__force u64) b;
488 }
489
490 static int be64_gt(__be64 a, __be64 b)
491 {
492         return (__force u64) a > (__force u64) b;
493 }
494
495 static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
496 {
497         struct rb_node **link = &cm.listen_service_table.rb_node;
498         struct rb_node *parent = NULL;
499         struct cm_id_private *cur_cm_id_priv;
500         __be64 service_id = cm_id_priv->id.service_id;
501         __be64 service_mask = cm_id_priv->id.service_mask;
502         int data_cmp;
503
504         while (*link) {
505                 parent = *link;
506                 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
507                                           service_node);
508                 data_cmp = cm_compare_data(cm_id_priv->compare_data,
509                                            cur_cm_id_priv->compare_data);
510                 if ((cur_cm_id_priv->id.service_mask & service_id) ==
511                     (service_mask & cur_cm_id_priv->id.service_id) &&
512                     (cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
513                     !data_cmp)
514                         return cur_cm_id_priv;
515
516                 if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
517                         link = &(*link)->rb_left;
518                 else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
519                         link = &(*link)->rb_right;
520                 else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
521                         link = &(*link)->rb_left;
522                 else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
523                         link = &(*link)->rb_right;
524                 else if (data_cmp < 0)
525                         link = &(*link)->rb_left;
526                 else
527                         link = &(*link)->rb_right;
528         }
529         rb_link_node(&cm_id_priv->service_node, parent, link);
530         rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
531         return NULL;
532 }
533
534 static struct cm_id_private * cm_find_listen(struct ib_device *device,
535                                              __be64 service_id,
536                                              u8 *private_data)
537 {
538         struct rb_node *node = cm.listen_service_table.rb_node;
539         struct cm_id_private *cm_id_priv;
540         int data_cmp;
541
542         while (node) {
543                 cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
544                 data_cmp = cm_compare_private_data(private_data,
545                                                    cm_id_priv->compare_data);
546                 if ((cm_id_priv->id.service_mask & service_id) ==
547                      cm_id_priv->id.service_id &&
548                     (cm_id_priv->id.device == device) && !data_cmp)
549                         return cm_id_priv;
550
551                 if (device < cm_id_priv->id.device)
552                         node = node->rb_left;
553                 else if (device > cm_id_priv->id.device)
554                         node = node->rb_right;
555                 else if (be64_lt(service_id, cm_id_priv->id.service_id))
556                         node = node->rb_left;
557                 else if (be64_gt(service_id, cm_id_priv->id.service_id))
558                         node = node->rb_right;
559                 else if (data_cmp < 0)
560                         node = node->rb_left;
561                 else
562                         node = node->rb_right;
563         }
564         return NULL;
565 }
566
567 static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
568                                                      *timewait_info)
569 {
570         struct rb_node **link = &cm.remote_id_table.rb_node;
571         struct rb_node *parent = NULL;
572         struct cm_timewait_info *cur_timewait_info;
573         __be64 remote_ca_guid = timewait_info->remote_ca_guid;
574         __be32 remote_id = timewait_info->work.remote_id;
575
576         while (*link) {
577                 parent = *link;
578                 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
579                                              remote_id_node);
580                 if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
581                         link = &(*link)->rb_left;
582                 else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
583                         link = &(*link)->rb_right;
584                 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
585                         link = &(*link)->rb_left;
586                 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
587                         link = &(*link)->rb_right;
588                 else
589                         return cur_timewait_info;
590         }
591         timewait_info->inserted_remote_id = 1;
592         rb_link_node(&timewait_info->remote_id_node, parent, link);
593         rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
594         return NULL;
595 }
596
597 static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
598                                                    __be32 remote_id)
599 {
600         struct rb_node *node = cm.remote_id_table.rb_node;
601         struct cm_timewait_info *timewait_info;
602
603         while (node) {
604                 timewait_info = rb_entry(node, struct cm_timewait_info,
605                                          remote_id_node);
606                 if (be32_lt(remote_id, timewait_info->work.remote_id))
607                         node = node->rb_left;
608                 else if (be32_gt(remote_id, timewait_info->work.remote_id))
609                         node = node->rb_right;
610                 else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
611                         node = node->rb_left;
612                 else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
613                         node = node->rb_right;
614                 else
615                         return timewait_info;
616         }
617         return NULL;
618 }
619
620 static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
621                                                       *timewait_info)
622 {
623         struct rb_node **link = &cm.remote_qp_table.rb_node;
624         struct rb_node *parent = NULL;
625         struct cm_timewait_info *cur_timewait_info;
626         __be64 remote_ca_guid = timewait_info->remote_ca_guid;
627         __be32 remote_qpn = timewait_info->remote_qpn;
628
629         while (*link) {
630                 parent = *link;
631                 cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
632                                              remote_qp_node);
633                 if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
634                         link = &(*link)->rb_left;
635                 else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
636                         link = &(*link)->rb_right;
637                 else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
638                         link = &(*link)->rb_left;
639                 else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
640                         link = &(*link)->rb_right;
641                 else
642                         return cur_timewait_info;
643         }
644         timewait_info->inserted_remote_qp = 1;
645         rb_link_node(&timewait_info->remote_qp_node, parent, link);
646         rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
647         return NULL;
648 }
649
650 static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
651                                                     *cm_id_priv)
652 {
653         struct rb_node **link = &cm.remote_sidr_table.rb_node;
654         struct rb_node *parent = NULL;
655         struct cm_id_private *cur_cm_id_priv;
656         union ib_gid *port_gid = &cm_id_priv->av.dgid;
657         __be32 remote_id = cm_id_priv->id.remote_id;
658
659         while (*link) {
660                 parent = *link;
661                 cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
662                                           sidr_id_node);
663                 if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
664                         link = &(*link)->rb_left;
665                 else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
666                         link = &(*link)->rb_right;
667                 else {
668                         int cmp;
669                         cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
670                                      sizeof *port_gid);
671                         if (cmp < 0)
672                                 link = &(*link)->rb_left;
673                         else if (cmp > 0)
674                                 link = &(*link)->rb_right;
675                         else
676                                 return cur_cm_id_priv;
677                 }
678         }
679         rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
680         rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
681         return NULL;
682 }
683
684 static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
685                                enum ib_cm_sidr_status status)
686 {
687         struct ib_cm_sidr_rep_param param;
688
689         memset(&param, 0, sizeof param);
690         param.status = status;
691         ib_send_cm_sidr_rep(&cm_id_priv->id, &param);
692 }
693
694 struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
695                                  ib_cm_handler cm_handler,
696                                  void *context)
697 {
698         struct cm_id_private *cm_id_priv;
699         int ret;
700
701         cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
702         if (!cm_id_priv)
703                 return ERR_PTR(-ENOMEM);
704
705         cm_id_priv->id.state = IB_CM_IDLE;
706         cm_id_priv->id.device = device;
707         cm_id_priv->id.cm_handler = cm_handler;
708         cm_id_priv->id.context = context;
709         cm_id_priv->id.remote_cm_qpn = 1;
710         ret = cm_alloc_id(cm_id_priv);
711         if (ret)
712                 goto error;
713
714         spin_lock_init(&cm_id_priv->lock);
715         init_completion(&cm_id_priv->comp);
716         INIT_LIST_HEAD(&cm_id_priv->work_list);
717         atomic_set(&cm_id_priv->work_count, -1);
718         atomic_set(&cm_id_priv->refcount, 1);
719         return &cm_id_priv->id;
720
721 error:
722         kfree(cm_id_priv);
723         return ERR_PTR(-ENOMEM);
724 }
725 EXPORT_SYMBOL(ib_create_cm_id);
726
727 static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
728 {
729         struct cm_work *work;
730
731         if (list_empty(&cm_id_priv->work_list))
732                 return NULL;
733
734         work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
735         list_del(&work->list);
736         return work;
737 }
738
739 static void cm_free_work(struct cm_work *work)
740 {
741         if (work->mad_recv_wc)
742                 ib_free_recv_mad(work->mad_recv_wc);
743         kfree(work);
744 }
745
746 static inline int cm_convert_to_ms(int iba_time)
747 {
748         /* approximate conversion to ms from 4.096us x 2^iba_time */
749         return 1 << max(iba_time - 8, 0);
750 }
751
752 /*
753  * calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
754  * Because of how ack_timeout is stored, adding one doubles the timeout.
755  * To avoid large timeouts, select the max(ack_delay, life_time + 1), and
756  * increment it (round up) only if the other is within 50%.
757  */
758 static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
759 {
760         int ack_timeout = packet_life_time + 1;
761
762         if (ack_timeout >= ca_ack_delay)
763                 ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
764         else
765                 ack_timeout = ca_ack_delay +
766                               (ack_timeout >= (ca_ack_delay - 1));
767
768         return min(31, ack_timeout);
769 }
770
771 static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
772 {
773         if (timewait_info->inserted_remote_id) {
774                 rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
775                 timewait_info->inserted_remote_id = 0;
776         }
777
778         if (timewait_info->inserted_remote_qp) {
779                 rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
780                 timewait_info->inserted_remote_qp = 0;
781         }
782 }
783
784 static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
785 {
786         struct cm_timewait_info *timewait_info;
787
788         timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
789         if (!timewait_info)
790                 return ERR_PTR(-ENOMEM);
791
792         timewait_info->work.local_id = local_id;
793         INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
794         timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
795         return timewait_info;
796 }
797
798 static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
799 {
800         int wait_time;
801         unsigned long flags;
802
803         spin_lock_irqsave(&cm.lock, flags);
804         cm_cleanup_timewait(cm_id_priv->timewait_info);
805         list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
806         spin_unlock_irqrestore(&cm.lock, flags);
807
808         /*
809          * The cm_id could be destroyed by the user before we exit timewait.
810          * To protect against this, we search for the cm_id after exiting
811          * timewait before notifying the user that we've exited timewait.
812          */
813         cm_id_priv->id.state = IB_CM_TIMEWAIT;
814         wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
815         queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
816                            msecs_to_jiffies(wait_time));
817         cm_id_priv->timewait_info = NULL;
818 }
819
820 static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
821 {
822         unsigned long flags;
823
824         cm_id_priv->id.state = IB_CM_IDLE;
825         if (cm_id_priv->timewait_info) {
826                 spin_lock_irqsave(&cm.lock, flags);
827                 cm_cleanup_timewait(cm_id_priv->timewait_info);
828                 spin_unlock_irqrestore(&cm.lock, flags);
829                 kfree(cm_id_priv->timewait_info);
830                 cm_id_priv->timewait_info = NULL;
831         }
832 }
833
834 static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
835 {
836         struct cm_id_private *cm_id_priv;
837         struct cm_work *work;
838
839         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
840 retest:
841         spin_lock_irq(&cm_id_priv->lock);
842         switch (cm_id->state) {
843         case IB_CM_LISTEN:
844                 cm_id->state = IB_CM_IDLE;
845                 spin_unlock_irq(&cm_id_priv->lock);
846                 spin_lock_irq(&cm.lock);
847                 rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
848                 spin_unlock_irq(&cm.lock);
849                 break;
850         case IB_CM_SIDR_REQ_SENT:
851                 cm_id->state = IB_CM_IDLE;
852                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
853                 spin_unlock_irq(&cm_id_priv->lock);
854                 break;
855         case IB_CM_SIDR_REQ_RCVD:
856                 spin_unlock_irq(&cm_id_priv->lock);
857                 cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
858                 break;
859         case IB_CM_REQ_SENT:
860                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
861                 spin_unlock_irq(&cm_id_priv->lock);
862                 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
863                                &cm_id_priv->id.device->node_guid,
864                                sizeof cm_id_priv->id.device->node_guid,
865                                NULL, 0);
866                 break;
867         case IB_CM_REQ_RCVD:
868                 if (err == -ENOMEM) {
869                         /* Do not reject to allow future retries. */
870                         cm_reset_to_idle(cm_id_priv);
871                         spin_unlock_irq(&cm_id_priv->lock);
872                 } else {
873                         spin_unlock_irq(&cm_id_priv->lock);
874                         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
875                                        NULL, 0, NULL, 0);
876                 }
877                 break;
878         case IB_CM_MRA_REQ_RCVD:
879         case IB_CM_REP_SENT:
880         case IB_CM_MRA_REP_RCVD:
881                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
882                 /* Fall through */
883         case IB_CM_MRA_REQ_SENT:
884         case IB_CM_REP_RCVD:
885         case IB_CM_MRA_REP_SENT:
886                 spin_unlock_irq(&cm_id_priv->lock);
887                 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
888                                NULL, 0, NULL, 0);
889                 break;
890         case IB_CM_ESTABLISHED:
891                 spin_unlock_irq(&cm_id_priv->lock);
892                 ib_send_cm_dreq(cm_id, NULL, 0);
893                 goto retest;
894         case IB_CM_DREQ_SENT:
895                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
896                 cm_enter_timewait(cm_id_priv);
897                 spin_unlock_irq(&cm_id_priv->lock);
898                 break;
899         case IB_CM_DREQ_RCVD:
900                 spin_unlock_irq(&cm_id_priv->lock);
901                 ib_send_cm_drep(cm_id, NULL, 0);
902                 break;
903         default:
904                 spin_unlock_irq(&cm_id_priv->lock);
905                 break;
906         }
907
908         cm_free_id(cm_id->local_id);
909         cm_deref_id(cm_id_priv);
910         wait_for_completion(&cm_id_priv->comp);
911         while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
912                 cm_free_work(work);
913         kfree(cm_id_priv->compare_data);
914         kfree(cm_id_priv->private_data);
915         kfree(cm_id_priv);
916 }
917
918 void ib_destroy_cm_id(struct ib_cm_id *cm_id)
919 {
920         cm_destroy_id(cm_id, 0);
921 }
922 EXPORT_SYMBOL(ib_destroy_cm_id);
923
924 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
925                  struct ib_cm_compare_data *compare_data)
926 {
927         struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
928         unsigned long flags;
929         int ret = 0;
930
931         service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
932         service_id &= service_mask;
933         if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
934             (service_id != IB_CM_ASSIGN_SERVICE_ID))
935                 return -EINVAL;
936
937         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
938         if (cm_id->state != IB_CM_IDLE)
939                 return -EINVAL;
940
941         if (compare_data) {
942                 cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
943                                                    GFP_KERNEL);
944                 if (!cm_id_priv->compare_data)
945                         return -ENOMEM;
946                 cm_mask_copy(cm_id_priv->compare_data->data,
947                              compare_data->data, compare_data->mask);
948                 memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
949                        IB_CM_COMPARE_SIZE);
950         }
951
952         cm_id->state = IB_CM_LISTEN;
953
954         spin_lock_irqsave(&cm.lock, flags);
955         if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
956                 cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
957                 cm_id->service_mask = ~cpu_to_be64(0);
958         } else {
959                 cm_id->service_id = service_id;
960                 cm_id->service_mask = service_mask;
961         }
962         cur_cm_id_priv = cm_insert_listen(cm_id_priv);
963         spin_unlock_irqrestore(&cm.lock, flags);
964
965         if (cur_cm_id_priv) {
966                 cm_id->state = IB_CM_IDLE;
967                 kfree(cm_id_priv->compare_data);
968                 cm_id_priv->compare_data = NULL;
969                 ret = -EBUSY;
970         }
971         return ret;
972 }
973 EXPORT_SYMBOL(ib_cm_listen);
974
975 static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
976                           enum cm_msg_sequence msg_seq)
977 {
978         u64 hi_tid, low_tid;
979
980         hi_tid   = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
981         low_tid  = (u64) ((__force u32)cm_id_priv->id.local_id |
982                           (msg_seq << 30));
983         return cpu_to_be64(hi_tid | low_tid);
984 }
985
986 static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
987                               __be16 attr_id, __be64 tid)
988 {
989         hdr->base_version  = IB_MGMT_BASE_VERSION;
990         hdr->mgmt_class    = IB_MGMT_CLASS_CM;
991         hdr->class_version = IB_CM_CLASS_VERSION;
992         hdr->method        = IB_MGMT_METHOD_SEND;
993         hdr->attr_id       = attr_id;
994         hdr->tid           = tid;
995 }
996
997 static void cm_format_req(struct cm_req_msg *req_msg,
998                           struct cm_id_private *cm_id_priv,
999                           struct ib_cm_req_param *param)
1000 {
1001         struct ib_sa_path_rec *pri_path = param->primary_path;
1002         struct ib_sa_path_rec *alt_path = param->alternate_path;
1003
1004         cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
1005                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
1006
1007         req_msg->local_comm_id = cm_id_priv->id.local_id;
1008         req_msg->service_id = param->service_id;
1009         req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1010         cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
1011         cm_req_set_resp_res(req_msg, param->responder_resources);
1012         cm_req_set_init_depth(req_msg, param->initiator_depth);
1013         cm_req_set_remote_resp_timeout(req_msg,
1014                                        param->remote_cm_response_timeout);
1015         cm_req_set_qp_type(req_msg, param->qp_type);
1016         cm_req_set_flow_ctrl(req_msg, param->flow_control);
1017         cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
1018         cm_req_set_local_resp_timeout(req_msg,
1019                                       param->local_cm_response_timeout);
1020         cm_req_set_retry_count(req_msg, param->retry_count);
1021         req_msg->pkey = param->primary_path->pkey;
1022         cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
1023         cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
1024         cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
1025         cm_req_set_srq(req_msg, param->srq);
1026
1027         if (pri_path->hop_limit <= 1) {
1028                 req_msg->primary_local_lid = pri_path->slid;
1029                 req_msg->primary_remote_lid = pri_path->dlid;
1030         } else {
1031                 /* Work-around until there's a way to obtain remote LID info */
1032                 req_msg->primary_local_lid = IB_LID_PERMISSIVE;
1033                 req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
1034         }
1035         req_msg->primary_local_gid = pri_path->sgid;
1036         req_msg->primary_remote_gid = pri_path->dgid;
1037         cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
1038         cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
1039         req_msg->primary_traffic_class = pri_path->traffic_class;
1040         req_msg->primary_hop_limit = pri_path->hop_limit;
1041         cm_req_set_primary_sl(req_msg, pri_path->sl);
1042         cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
1043         cm_req_set_primary_local_ack_timeout(req_msg,
1044                 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1045                                pri_path->packet_life_time));
1046
1047         if (alt_path) {
1048                 if (alt_path->hop_limit <= 1) {
1049                         req_msg->alt_local_lid = alt_path->slid;
1050                         req_msg->alt_remote_lid = alt_path->dlid;
1051                 } else {
1052                         req_msg->alt_local_lid = IB_LID_PERMISSIVE;
1053                         req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
1054                 }
1055                 req_msg->alt_local_gid = alt_path->sgid;
1056                 req_msg->alt_remote_gid = alt_path->dgid;
1057                 cm_req_set_alt_flow_label(req_msg,
1058                                           alt_path->flow_label);
1059                 cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
1060                 req_msg->alt_traffic_class = alt_path->traffic_class;
1061                 req_msg->alt_hop_limit = alt_path->hop_limit;
1062                 cm_req_set_alt_sl(req_msg, alt_path->sl);
1063                 cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
1064                 cm_req_set_alt_local_ack_timeout(req_msg,
1065                         cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
1066                                        alt_path->packet_life_time));
1067         }
1068
1069         if (param->private_data && param->private_data_len)
1070                 memcpy(req_msg->private_data, param->private_data,
1071                        param->private_data_len);
1072 }
1073
1074 static int cm_validate_req_param(struct ib_cm_req_param *param)
1075 {
1076         /* peer-to-peer not supported */
1077         if (param->peer_to_peer)
1078                 return -EINVAL;
1079
1080         if (!param->primary_path)
1081                 return -EINVAL;
1082
1083         if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC)
1084                 return -EINVAL;
1085
1086         if (param->private_data &&
1087             param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
1088                 return -EINVAL;
1089
1090         if (param->alternate_path &&
1091             (param->alternate_path->pkey != param->primary_path->pkey ||
1092              param->alternate_path->mtu != param->primary_path->mtu))
1093                 return -EINVAL;
1094
1095         return 0;
1096 }
1097
1098 int ib_send_cm_req(struct ib_cm_id *cm_id,
1099                    struct ib_cm_req_param *param)
1100 {
1101         struct cm_id_private *cm_id_priv;
1102         struct cm_req_msg *req_msg;
1103         unsigned long flags;
1104         int ret;
1105
1106         ret = cm_validate_req_param(param);
1107         if (ret)
1108                 return ret;
1109
1110         /* Verify that we're not in timewait. */
1111         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1112         spin_lock_irqsave(&cm_id_priv->lock, flags);
1113         if (cm_id->state != IB_CM_IDLE) {
1114                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1115                 ret = -EINVAL;
1116                 goto out;
1117         }
1118         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1119
1120         cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1121                                                             id.local_id);
1122         if (IS_ERR(cm_id_priv->timewait_info)) {
1123                 ret = PTR_ERR(cm_id_priv->timewait_info);
1124                 goto out;
1125         }
1126
1127         ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
1128         if (ret)
1129                 goto error1;
1130         if (param->alternate_path) {
1131                 ret = cm_init_av_by_path(param->alternate_path,
1132                                          &cm_id_priv->alt_av);
1133                 if (ret)
1134                         goto error1;
1135         }
1136         cm_id->service_id = param->service_id;
1137         cm_id->service_mask = ~cpu_to_be64(0);
1138         cm_id_priv->timeout_ms = cm_convert_to_ms(
1139                                     param->primary_path->packet_life_time) * 2 +
1140                                  cm_convert_to_ms(
1141                                     param->remote_cm_response_timeout);
1142         cm_id_priv->max_cm_retries = param->max_cm_retries;
1143         cm_id_priv->initiator_depth = param->initiator_depth;
1144         cm_id_priv->responder_resources = param->responder_resources;
1145         cm_id_priv->retry_count = param->retry_count;
1146         cm_id_priv->path_mtu = param->primary_path->mtu;
1147         cm_id_priv->pkey = param->primary_path->pkey;
1148         cm_id_priv->qp_type = param->qp_type;
1149
1150         ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
1151         if (ret)
1152                 goto error1;
1153
1154         req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
1155         cm_format_req(req_msg, cm_id_priv, param);
1156         cm_id_priv->tid = req_msg->hdr.tid;
1157         cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
1158         cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
1159
1160         cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
1161         cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
1162
1163         spin_lock_irqsave(&cm_id_priv->lock, flags);
1164         ret = ib_post_send_mad(cm_id_priv->msg, NULL);
1165         if (ret) {
1166                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1167                 goto error2;
1168         }
1169         BUG_ON(cm_id->state != IB_CM_IDLE);
1170         cm_id->state = IB_CM_REQ_SENT;
1171         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1172         return 0;
1173
1174 error2: cm_free_msg(cm_id_priv->msg);
1175 error1: kfree(cm_id_priv->timewait_info);
1176 out:    return ret;
1177 }
1178 EXPORT_SYMBOL(ib_send_cm_req);
1179
1180 static int cm_issue_rej(struct cm_port *port,
1181                         struct ib_mad_recv_wc *mad_recv_wc,
1182                         enum ib_cm_rej_reason reason,
1183                         enum cm_msg_response msg_rejected,
1184                         void *ari, u8 ari_length)
1185 {
1186         struct ib_mad_send_buf *msg = NULL;
1187         struct cm_rej_msg *rej_msg, *rcv_msg;
1188         int ret;
1189
1190         ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
1191         if (ret)
1192                 return ret;
1193
1194         /* We just need common CM header information.  Cast to any message. */
1195         rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
1196         rej_msg = (struct cm_rej_msg *) msg->mad;
1197
1198         cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
1199         rej_msg->remote_comm_id = rcv_msg->local_comm_id;
1200         rej_msg->local_comm_id = rcv_msg->remote_comm_id;
1201         cm_rej_set_msg_rejected(rej_msg, msg_rejected);
1202         rej_msg->reason = cpu_to_be16(reason);
1203
1204         if (ari && ari_length) {
1205                 cm_rej_set_reject_info_len(rej_msg, ari_length);
1206                 memcpy(rej_msg->ari, ari, ari_length);
1207         }
1208
1209         ret = ib_post_send_mad(msg, NULL);
1210         if (ret)
1211                 cm_free_msg(msg);
1212
1213         return ret;
1214 }
1215
1216 static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
1217                                     __be32 local_qpn, __be32 remote_qpn)
1218 {
1219         return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
1220                 ((local_ca_guid == remote_ca_guid) &&
1221                  (be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
1222 }
1223
1224 static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
1225                                             struct ib_sa_path_rec *primary_path,
1226                                             struct ib_sa_path_rec *alt_path)
1227 {
1228         memset(primary_path, 0, sizeof *primary_path);
1229         primary_path->dgid = req_msg->primary_local_gid;
1230         primary_path->sgid = req_msg->primary_remote_gid;
1231         primary_path->dlid = req_msg->primary_local_lid;
1232         primary_path->slid = req_msg->primary_remote_lid;
1233         primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
1234         primary_path->hop_limit = req_msg->primary_hop_limit;
1235         primary_path->traffic_class = req_msg->primary_traffic_class;
1236         primary_path->reversible = 1;
1237         primary_path->pkey = req_msg->pkey;
1238         primary_path->sl = cm_req_get_primary_sl(req_msg);
1239         primary_path->mtu_selector = IB_SA_EQ;
1240         primary_path->mtu = cm_req_get_path_mtu(req_msg);
1241         primary_path->rate_selector = IB_SA_EQ;
1242         primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
1243         primary_path->packet_life_time_selector = IB_SA_EQ;
1244         primary_path->packet_life_time =
1245                 cm_req_get_primary_local_ack_timeout(req_msg);
1246         primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
1247
1248         if (req_msg->alt_local_lid) {
1249                 memset(alt_path, 0, sizeof *alt_path);
1250                 alt_path->dgid = req_msg->alt_local_gid;
1251                 alt_path->sgid = req_msg->alt_remote_gid;
1252                 alt_path->dlid = req_msg->alt_local_lid;
1253                 alt_path->slid = req_msg->alt_remote_lid;
1254                 alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
1255                 alt_path->hop_limit = req_msg->alt_hop_limit;
1256                 alt_path->traffic_class = req_msg->alt_traffic_class;
1257                 alt_path->reversible = 1;
1258                 alt_path->pkey = req_msg->pkey;
1259                 alt_path->sl = cm_req_get_alt_sl(req_msg);
1260                 alt_path->mtu_selector = IB_SA_EQ;
1261                 alt_path->mtu = cm_req_get_path_mtu(req_msg);
1262                 alt_path->rate_selector = IB_SA_EQ;
1263                 alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
1264                 alt_path->packet_life_time_selector = IB_SA_EQ;
1265                 alt_path->packet_life_time =
1266                         cm_req_get_alt_local_ack_timeout(req_msg);
1267                 alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
1268         }
1269 }
1270
1271 static void cm_format_req_event(struct cm_work *work,
1272                                 struct cm_id_private *cm_id_priv,
1273                                 struct ib_cm_id *listen_id)
1274 {
1275         struct cm_req_msg *req_msg;
1276         struct ib_cm_req_event_param *param;
1277
1278         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1279         param = &work->cm_event.param.req_rcvd;
1280         param->listen_id = listen_id;
1281         param->port = cm_id_priv->av.port->port_num;
1282         param->primary_path = &work->path[0];
1283         if (req_msg->alt_local_lid)
1284                 param->alternate_path = &work->path[1];
1285         else
1286                 param->alternate_path = NULL;
1287         param->remote_ca_guid = req_msg->local_ca_guid;
1288         param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
1289         param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
1290         param->qp_type = cm_req_get_qp_type(req_msg);
1291         param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
1292         param->responder_resources = cm_req_get_init_depth(req_msg);
1293         param->initiator_depth = cm_req_get_resp_res(req_msg);
1294         param->local_cm_response_timeout =
1295                                         cm_req_get_remote_resp_timeout(req_msg);
1296         param->flow_control = cm_req_get_flow_ctrl(req_msg);
1297         param->remote_cm_response_timeout =
1298                                         cm_req_get_local_resp_timeout(req_msg);
1299         param->retry_count = cm_req_get_retry_count(req_msg);
1300         param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1301         param->srq = cm_req_get_srq(req_msg);
1302         work->cm_event.private_data = &req_msg->private_data;
1303 }
1304
1305 static void cm_process_work(struct cm_id_private *cm_id_priv,
1306                             struct cm_work *work)
1307 {
1308         int ret;
1309
1310         /* We will typically only have the current event to report. */
1311         ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
1312         cm_free_work(work);
1313
1314         while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
1315                 spin_lock_irq(&cm_id_priv->lock);
1316                 work = cm_dequeue_work(cm_id_priv);
1317                 spin_unlock_irq(&cm_id_priv->lock);
1318                 BUG_ON(!work);
1319                 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
1320                                                 &work->cm_event);
1321                 cm_free_work(work);
1322         }
1323         cm_deref_id(cm_id_priv);
1324         if (ret)
1325                 cm_destroy_id(&cm_id_priv->id, ret);
1326 }
1327
1328 static void cm_format_mra(struct cm_mra_msg *mra_msg,
1329                           struct cm_id_private *cm_id_priv,
1330                           enum cm_msg_response msg_mraed, u8 service_timeout,
1331                           const void *private_data, u8 private_data_len)
1332 {
1333         cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
1334         cm_mra_set_msg_mraed(mra_msg, msg_mraed);
1335         mra_msg->local_comm_id = cm_id_priv->id.local_id;
1336         mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
1337         cm_mra_set_service_timeout(mra_msg, service_timeout);
1338
1339         if (private_data && private_data_len)
1340                 memcpy(mra_msg->private_data, private_data, private_data_len);
1341 }
1342
1343 static void cm_format_rej(struct cm_rej_msg *rej_msg,
1344                           struct cm_id_private *cm_id_priv,
1345                           enum ib_cm_rej_reason reason,
1346                           void *ari,
1347                           u8 ari_length,
1348                           const void *private_data,
1349                           u8 private_data_len)
1350 {
1351         cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
1352         rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
1353
1354         switch(cm_id_priv->id.state) {
1355         case IB_CM_REQ_RCVD:
1356                 rej_msg->local_comm_id = 0;
1357                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1358                 break;
1359         case IB_CM_MRA_REQ_SENT:
1360                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1361                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
1362                 break;
1363         case IB_CM_REP_RCVD:
1364         case IB_CM_MRA_REP_SENT:
1365                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1366                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
1367                 break;
1368         default:
1369                 rej_msg->local_comm_id = cm_id_priv->id.local_id;
1370                 cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
1371                 break;
1372         }
1373
1374         rej_msg->reason = cpu_to_be16(reason);
1375         if (ari && ari_length) {
1376                 cm_rej_set_reject_info_len(rej_msg, ari_length);
1377                 memcpy(rej_msg->ari, ari, ari_length);
1378         }
1379
1380         if (private_data && private_data_len)
1381                 memcpy(rej_msg->private_data, private_data, private_data_len);
1382 }
1383
1384 static void cm_dup_req_handler(struct cm_work *work,
1385                                struct cm_id_private *cm_id_priv)
1386 {
1387         struct ib_mad_send_buf *msg = NULL;
1388         int ret;
1389
1390         atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1391                         counter[CM_REQ_COUNTER]);
1392
1393         /* Quick state check to discard duplicate REQs. */
1394         if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
1395                 return;
1396
1397         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1398         if (ret)
1399                 return;
1400
1401         spin_lock_irq(&cm_id_priv->lock);
1402         switch (cm_id_priv->id.state) {
1403         case IB_CM_MRA_REQ_SENT:
1404                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1405                               CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
1406                               cm_id_priv->private_data,
1407                               cm_id_priv->private_data_len);
1408                 break;
1409         case IB_CM_TIMEWAIT:
1410                 cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
1411                               IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
1412                 break;
1413         default:
1414                 goto unlock;
1415         }
1416         spin_unlock_irq(&cm_id_priv->lock);
1417
1418         ret = ib_post_send_mad(msg, NULL);
1419         if (ret)
1420                 goto free;
1421         return;
1422
1423 unlock: spin_unlock_irq(&cm_id_priv->lock);
1424 free:   cm_free_msg(msg);
1425 }
1426
1427 static struct cm_id_private * cm_match_req(struct cm_work *work,
1428                                            struct cm_id_private *cm_id_priv)
1429 {
1430         struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
1431         struct cm_timewait_info *timewait_info;
1432         struct cm_req_msg *req_msg;
1433
1434         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1435
1436         /* Check for possible duplicate REQ. */
1437         spin_lock_irq(&cm.lock);
1438         timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
1439         if (timewait_info) {
1440                 cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
1441                                            timewait_info->work.remote_id);
1442                 spin_unlock_irq(&cm.lock);
1443                 if (cur_cm_id_priv) {
1444                         cm_dup_req_handler(work, cur_cm_id_priv);
1445                         cm_deref_id(cur_cm_id_priv);
1446                 }
1447                 return NULL;
1448         }
1449
1450         /* Check for stale connections. */
1451         timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
1452         if (timewait_info) {
1453                 cm_cleanup_timewait(cm_id_priv->timewait_info);
1454                 spin_unlock_irq(&cm.lock);
1455                 cm_issue_rej(work->port, work->mad_recv_wc,
1456                              IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
1457                              NULL, 0);
1458                 return NULL;
1459         }
1460
1461         /* Find matching listen request. */
1462         listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
1463                                            req_msg->service_id,
1464                                            req_msg->private_data);
1465         if (!listen_cm_id_priv) {
1466                 cm_cleanup_timewait(cm_id_priv->timewait_info);
1467                 spin_unlock_irq(&cm.lock);
1468                 cm_issue_rej(work->port, work->mad_recv_wc,
1469                              IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
1470                              NULL, 0);
1471                 goto out;
1472         }
1473         atomic_inc(&listen_cm_id_priv->refcount);
1474         atomic_inc(&cm_id_priv->refcount);
1475         cm_id_priv->id.state = IB_CM_REQ_RCVD;
1476         atomic_inc(&cm_id_priv->work_count);
1477         spin_unlock_irq(&cm.lock);
1478 out:
1479         return listen_cm_id_priv;
1480 }
1481
1482 /*
1483  * Work-around for inter-subnet connections.  If the LIDs are permissive,
1484  * we need to override the LID/SL data in the REQ with the LID information
1485  * in the work completion.
1486  */
1487 static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
1488 {
1489         if (!cm_req_get_primary_subnet_local(req_msg)) {
1490                 if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
1491                         req_msg->primary_local_lid = cpu_to_be16(wc->slid);
1492                         cm_req_set_primary_sl(req_msg, wc->sl);
1493                 }
1494
1495                 if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
1496                         req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1497         }
1498
1499         if (!cm_req_get_alt_subnet_local(req_msg)) {
1500                 if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
1501                         req_msg->alt_local_lid = cpu_to_be16(wc->slid);
1502                         cm_req_set_alt_sl(req_msg, wc->sl);
1503                 }
1504
1505                 if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
1506                         req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
1507         }
1508 }
1509
1510 static int cm_req_handler(struct cm_work *work)
1511 {
1512         struct ib_cm_id *cm_id;
1513         struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
1514         struct cm_req_msg *req_msg;
1515         int ret;
1516
1517         req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
1518
1519         cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
1520         if (IS_ERR(cm_id))
1521                 return PTR_ERR(cm_id);
1522
1523         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1524         cm_id_priv->id.remote_id = req_msg->local_comm_id;
1525         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
1526                                 work->mad_recv_wc->recv_buf.grh,
1527                                 &cm_id_priv->av);
1528         cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
1529                                                             id.local_id);
1530         if (IS_ERR(cm_id_priv->timewait_info)) {
1531                 ret = PTR_ERR(cm_id_priv->timewait_info);
1532                 goto destroy;
1533         }
1534         cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
1535         cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
1536         cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
1537
1538         listen_cm_id_priv = cm_match_req(work, cm_id_priv);
1539         if (!listen_cm_id_priv) {
1540                 ret = -EINVAL;
1541                 kfree(cm_id_priv->timewait_info);
1542                 goto destroy;
1543         }
1544
1545         cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
1546         cm_id_priv->id.context = listen_cm_id_priv->id.context;
1547         cm_id_priv->id.service_id = req_msg->service_id;
1548         cm_id_priv->id.service_mask = ~cpu_to_be64(0);
1549
1550         cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
1551         cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
1552         ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
1553         if (ret) {
1554                 ib_get_cached_gid(work->port->cm_dev->ib_device,
1555                                   work->port->port_num, 0, &work->path[0].sgid);
1556                 ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
1557                                &work->path[0].sgid, sizeof work->path[0].sgid,
1558                                NULL, 0);
1559                 goto rejected;
1560         }
1561         if (req_msg->alt_local_lid) {
1562                 ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
1563                 if (ret) {
1564                         ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
1565                                        &work->path[0].sgid,
1566                                        sizeof work->path[0].sgid, NULL, 0);
1567                         goto rejected;
1568                 }
1569         }
1570         cm_id_priv->tid = req_msg->hdr.tid;
1571         cm_id_priv->timeout_ms = cm_convert_to_ms(
1572                                         cm_req_get_local_resp_timeout(req_msg));
1573         cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
1574         cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
1575         cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
1576         cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
1577         cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
1578         cm_id_priv->pkey = req_msg->pkey;
1579         cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
1580         cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
1581         cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
1582         cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
1583
1584         cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
1585         cm_process_work(cm_id_priv, work);
1586         cm_deref_id(listen_cm_id_priv);
1587         return 0;
1588
1589 rejected:
1590         atomic_dec(&cm_id_priv->refcount);
1591         cm_deref_id(listen_cm_id_priv);
1592 destroy:
1593         ib_destroy_cm_id(cm_id);
1594         return ret;
1595 }
1596
1597 static void cm_format_rep(struct cm_rep_msg *rep_msg,
1598                           struct cm_id_private *cm_id_priv,
1599                           struct ib_cm_rep_param *param)
1600 {
1601         cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
1602         rep_msg->local_comm_id = cm_id_priv->id.local_id;
1603         rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
1604         cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
1605         cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
1606         rep_msg->resp_resources = param->responder_resources;
1607         rep_msg->initiator_depth = param->initiator_depth;
1608         cm_rep_set_target_ack_delay(rep_msg,
1609                                     cm_id_priv->av.port->cm_dev->ack_delay);
1610         cm_rep_set_failover(rep_msg, param->failover_accepted);
1611         cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
1612         cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
1613         cm_rep_set_srq(rep_msg, param->srq);
1614         rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
1615
1616         if (param->private_data && param->private_data_len)
1617                 memcpy(rep_msg->private_data, param->private_data,
1618                        param->private_data_len);
1619 }
1620
1621 int ib_send_cm_rep(struct ib_cm_id *cm_id,
1622                    struct ib_cm_rep_param *param)
1623 {
1624         struct cm_id_private *cm_id_priv;
1625         struct ib_mad_send_buf *msg;
1626         struct cm_rep_msg *rep_msg;
1627         unsigned long flags;
1628         int ret;
1629
1630         if (param->private_data &&
1631             param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
1632                 return -EINVAL;
1633
1634         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1635         spin_lock_irqsave(&cm_id_priv->lock, flags);
1636         if (cm_id->state != IB_CM_REQ_RCVD &&
1637             cm_id->state != IB_CM_MRA_REQ_SENT) {
1638                 ret = -EINVAL;
1639                 goto out;
1640         }
1641
1642         ret = cm_alloc_msg(cm_id_priv, &msg);
1643         if (ret)
1644                 goto out;
1645
1646         rep_msg = (struct cm_rep_msg *) msg->mad;
1647         cm_format_rep(rep_msg, cm_id_priv, param);
1648         msg->timeout_ms = cm_id_priv->timeout_ms;
1649         msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
1650
1651         ret = ib_post_send_mad(msg, NULL);
1652         if (ret) {
1653                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1654                 cm_free_msg(msg);
1655                 return ret;
1656         }
1657
1658         cm_id->state = IB_CM_REP_SENT;
1659         cm_id_priv->msg = msg;
1660         cm_id_priv->initiator_depth = param->initiator_depth;
1661         cm_id_priv->responder_resources = param->responder_resources;
1662         cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
1663         cm_id_priv->local_qpn = cm_rep_get_local_qpn(rep_msg);
1664
1665 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1666         return ret;
1667 }
1668 EXPORT_SYMBOL(ib_send_cm_rep);
1669
1670 static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
1671                           struct cm_id_private *cm_id_priv,
1672                           const void *private_data,
1673                           u8 private_data_len)
1674 {
1675         cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
1676         rtu_msg->local_comm_id = cm_id_priv->id.local_id;
1677         rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
1678
1679         if (private_data && private_data_len)
1680                 memcpy(rtu_msg->private_data, private_data, private_data_len);
1681 }
1682
1683 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
1684                    const void *private_data,
1685                    u8 private_data_len)
1686 {
1687         struct cm_id_private *cm_id_priv;
1688         struct ib_mad_send_buf *msg;
1689         unsigned long flags;
1690         void *data;
1691         int ret;
1692
1693         if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
1694                 return -EINVAL;
1695
1696         data = cm_copy_private_data(private_data, private_data_len);
1697         if (IS_ERR(data))
1698                 return PTR_ERR(data);
1699
1700         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1701         spin_lock_irqsave(&cm_id_priv->lock, flags);
1702         if (cm_id->state != IB_CM_REP_RCVD &&
1703             cm_id->state != IB_CM_MRA_REP_SENT) {
1704                 ret = -EINVAL;
1705                 goto error;
1706         }
1707
1708         ret = cm_alloc_msg(cm_id_priv, &msg);
1709         if (ret)
1710                 goto error;
1711
1712         cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1713                       private_data, private_data_len);
1714
1715         ret = ib_post_send_mad(msg, NULL);
1716         if (ret) {
1717                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1718                 cm_free_msg(msg);
1719                 kfree(data);
1720                 return ret;
1721         }
1722
1723         cm_id->state = IB_CM_ESTABLISHED;
1724         cm_set_private_data(cm_id_priv, data, private_data_len);
1725         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1726         return 0;
1727
1728 error:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
1729         kfree(data);
1730         return ret;
1731 }
1732 EXPORT_SYMBOL(ib_send_cm_rtu);
1733
1734 static void cm_format_rep_event(struct cm_work *work)
1735 {
1736         struct cm_rep_msg *rep_msg;
1737         struct ib_cm_rep_event_param *param;
1738
1739         rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1740         param = &work->cm_event.param.rep_rcvd;
1741         param->remote_ca_guid = rep_msg->local_ca_guid;
1742         param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
1743         param->remote_qpn = be32_to_cpu(cm_rep_get_local_qpn(rep_msg));
1744         param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
1745         param->responder_resources = rep_msg->initiator_depth;
1746         param->initiator_depth = rep_msg->resp_resources;
1747         param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1748         param->failover_accepted = cm_rep_get_failover(rep_msg);
1749         param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
1750         param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1751         param->srq = cm_rep_get_srq(rep_msg);
1752         work->cm_event.private_data = &rep_msg->private_data;
1753 }
1754
1755 static void cm_dup_rep_handler(struct cm_work *work)
1756 {
1757         struct cm_id_private *cm_id_priv;
1758         struct cm_rep_msg *rep_msg;
1759         struct ib_mad_send_buf *msg = NULL;
1760         int ret;
1761
1762         rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
1763         cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
1764                                    rep_msg->local_comm_id);
1765         if (!cm_id_priv)
1766                 return;
1767
1768         atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1769                         counter[CM_REP_COUNTER]);
1770         ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
1771         if (ret)
1772                 goto deref;
1773
1774         spin_lock_irq(&cm_id_priv->lock);
1775         if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
1776                 cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
1777                               cm_id_priv->private_data,
1778                               cm_id_priv->private_data_len);
1779         else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
1780                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
1781                               CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
1782                               cm_id_priv->private_data,
1783                               cm_id_priv->private_data_len);
1784         else
1785                 goto unlock;
1786         spin_unlock_irq(&cm_id_priv->lock);
1787
1788         ret = ib_post_send_mad(msg, NULL);
1789         if (ret)
1790                 goto free;
1791         goto deref;
1792
1793 unlock: spin_unlock_irq(&cm_id_priv->lock);
1794 free:   cm_free_msg(msg);
1795 deref:  cm_deref_id(cm_id_priv);
1796 }
1797
1798 static int cm_rep_handler(struct cm_work *work)
1799 {
1800         struct cm_id_private *cm_id_priv;
1801         struct cm_rep_msg *rep_msg;
1802         int ret;
1803
1804         rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
1805         cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
1806         if (!cm_id_priv) {
1807                 cm_dup_rep_handler(work);
1808                 return -EINVAL;
1809         }
1810
1811         cm_format_rep_event(work);
1812
1813         spin_lock_irq(&cm_id_priv->lock);
1814         switch (cm_id_priv->id.state) {
1815         case IB_CM_REQ_SENT:
1816         case IB_CM_MRA_REQ_RCVD:
1817                 break;
1818         default:
1819                 spin_unlock_irq(&cm_id_priv->lock);
1820                 ret = -EINVAL;
1821                 goto error;
1822         }
1823
1824         cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
1825         cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
1826         cm_id_priv->timewait_info->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1827
1828         spin_lock(&cm.lock);
1829         /* Check for duplicate REP. */
1830         if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
1831                 spin_unlock(&cm.lock);
1832                 spin_unlock_irq(&cm_id_priv->lock);
1833                 ret = -EINVAL;
1834                 goto error;
1835         }
1836         /* Check for a stale connection. */
1837         if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
1838                 rb_erase(&cm_id_priv->timewait_info->remote_id_node,
1839                          &cm.remote_id_table);
1840                 cm_id_priv->timewait_info->inserted_remote_id = 0;
1841                 spin_unlock(&cm.lock);
1842                 spin_unlock_irq(&cm_id_priv->lock);
1843                 cm_issue_rej(work->port, work->mad_recv_wc,
1844                              IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
1845                              NULL, 0);
1846                 ret = -EINVAL;
1847                 goto error;
1848         }
1849         spin_unlock(&cm.lock);
1850
1851         cm_id_priv->id.state = IB_CM_REP_RCVD;
1852         cm_id_priv->id.remote_id = rep_msg->local_comm_id;
1853         cm_id_priv->remote_qpn = cm_rep_get_local_qpn(rep_msg);
1854         cm_id_priv->initiator_depth = rep_msg->resp_resources;
1855         cm_id_priv->responder_resources = rep_msg->initiator_depth;
1856         cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
1857         cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
1858         cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
1859         cm_id_priv->av.timeout =
1860                         cm_ack_timeout(cm_id_priv->target_ack_delay,
1861                                        cm_id_priv->av.timeout - 1);
1862         cm_id_priv->alt_av.timeout =
1863                         cm_ack_timeout(cm_id_priv->target_ack_delay,
1864                                        cm_id_priv->alt_av.timeout - 1);
1865
1866         /* todo: handle peer_to_peer */
1867
1868         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1869         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1870         if (!ret)
1871                 list_add_tail(&work->list, &cm_id_priv->work_list);
1872         spin_unlock_irq(&cm_id_priv->lock);
1873
1874         if (ret)
1875                 cm_process_work(cm_id_priv, work);
1876         else
1877                 cm_deref_id(cm_id_priv);
1878         return 0;
1879
1880 error:
1881         cm_deref_id(cm_id_priv);
1882         return ret;
1883 }
1884
1885 static int cm_establish_handler(struct cm_work *work)
1886 {
1887         struct cm_id_private *cm_id_priv;
1888         int ret;
1889
1890         /* See comment in cm_establish about lookup. */
1891         cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
1892         if (!cm_id_priv)
1893                 return -EINVAL;
1894
1895         spin_lock_irq(&cm_id_priv->lock);
1896         if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
1897                 spin_unlock_irq(&cm_id_priv->lock);
1898                 goto out;
1899         }
1900
1901         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1902         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1903         if (!ret)
1904                 list_add_tail(&work->list, &cm_id_priv->work_list);
1905         spin_unlock_irq(&cm_id_priv->lock);
1906
1907         if (ret)
1908                 cm_process_work(cm_id_priv, work);
1909         else
1910                 cm_deref_id(cm_id_priv);
1911         return 0;
1912 out:
1913         cm_deref_id(cm_id_priv);
1914         return -EINVAL;
1915 }
1916
1917 static int cm_rtu_handler(struct cm_work *work)
1918 {
1919         struct cm_id_private *cm_id_priv;
1920         struct cm_rtu_msg *rtu_msg;
1921         int ret;
1922
1923         rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
1924         cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
1925                                    rtu_msg->local_comm_id);
1926         if (!cm_id_priv)
1927                 return -EINVAL;
1928
1929         work->cm_event.private_data = &rtu_msg->private_data;
1930
1931         spin_lock_irq(&cm_id_priv->lock);
1932         if (cm_id_priv->id.state != IB_CM_REP_SENT &&
1933             cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
1934                 spin_unlock_irq(&cm_id_priv->lock);
1935                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
1936                                 counter[CM_RTU_COUNTER]);
1937                 goto out;
1938         }
1939         cm_id_priv->id.state = IB_CM_ESTABLISHED;
1940
1941         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1942         ret = atomic_inc_and_test(&cm_id_priv->work_count);
1943         if (!ret)
1944                 list_add_tail(&work->list, &cm_id_priv->work_list);
1945         spin_unlock_irq(&cm_id_priv->lock);
1946
1947         if (ret)
1948                 cm_process_work(cm_id_priv, work);
1949         else
1950                 cm_deref_id(cm_id_priv);
1951         return 0;
1952 out:
1953         cm_deref_id(cm_id_priv);
1954         return -EINVAL;
1955 }
1956
1957 static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
1958                           struct cm_id_private *cm_id_priv,
1959                           const void *private_data,
1960                           u8 private_data_len)
1961 {
1962         cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
1963                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
1964         dreq_msg->local_comm_id = cm_id_priv->id.local_id;
1965         dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
1966         cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
1967
1968         if (private_data && private_data_len)
1969                 memcpy(dreq_msg->private_data, private_data, private_data_len);
1970 }
1971
1972 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1973                     const void *private_data,
1974                     u8 private_data_len)
1975 {
1976         struct cm_id_private *cm_id_priv;
1977         struct ib_mad_send_buf *msg;
1978         unsigned long flags;
1979         int ret;
1980
1981         if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
1982                 return -EINVAL;
1983
1984         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
1985         spin_lock_irqsave(&cm_id_priv->lock, flags);
1986         if (cm_id->state != IB_CM_ESTABLISHED) {
1987                 ret = -EINVAL;
1988                 goto out;
1989         }
1990
1991         if (cm_id->lap_state == IB_CM_LAP_SENT ||
1992             cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
1993                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1994
1995         ret = cm_alloc_msg(cm_id_priv, &msg);
1996         if (ret) {
1997                 cm_enter_timewait(cm_id_priv);
1998                 goto out;
1999         }
2000
2001         cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
2002                        private_data, private_data_len);
2003         msg->timeout_ms = cm_id_priv->timeout_ms;
2004         msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
2005
2006         ret = ib_post_send_mad(msg, NULL);
2007         if (ret) {
2008                 cm_enter_timewait(cm_id_priv);
2009                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2010                 cm_free_msg(msg);
2011                 return ret;
2012         }
2013
2014         cm_id->state = IB_CM_DREQ_SENT;
2015         cm_id_priv->msg = msg;
2016 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2017         return ret;
2018 }
2019 EXPORT_SYMBOL(ib_send_cm_dreq);
2020
2021 static void cm_format_drep(struct cm_drep_msg *drep_msg,
2022                           struct cm_id_private *cm_id_priv,
2023                           const void *private_data,
2024                           u8 private_data_len)
2025 {
2026         cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
2027         drep_msg->local_comm_id = cm_id_priv->id.local_id;
2028         drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
2029
2030         if (private_data && private_data_len)
2031                 memcpy(drep_msg->private_data, private_data, private_data_len);
2032 }
2033
2034 int ib_send_cm_drep(struct ib_cm_id *cm_id,
2035                     const void *private_data,
2036                     u8 private_data_len)
2037 {
2038         struct cm_id_private *cm_id_priv;
2039         struct ib_mad_send_buf *msg;
2040         unsigned long flags;
2041         void *data;
2042         int ret;
2043
2044         if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
2045                 return -EINVAL;
2046
2047         data = cm_copy_private_data(private_data, private_data_len);
2048         if (IS_ERR(data))
2049                 return PTR_ERR(data);
2050
2051         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2052         spin_lock_irqsave(&cm_id_priv->lock, flags);
2053         if (cm_id->state != IB_CM_DREQ_RCVD) {
2054                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2055                 kfree(data);
2056                 return -EINVAL;
2057         }
2058
2059         cm_set_private_data(cm_id_priv, data, private_data_len);
2060         cm_enter_timewait(cm_id_priv);
2061
2062         ret = cm_alloc_msg(cm_id_priv, &msg);
2063         if (ret)
2064                 goto out;
2065
2066         cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2067                        private_data, private_data_len);
2068
2069         ret = ib_post_send_mad(msg, NULL);
2070         if (ret) {
2071                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2072                 cm_free_msg(msg);
2073                 return ret;
2074         }
2075
2076 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2077         return ret;
2078 }
2079 EXPORT_SYMBOL(ib_send_cm_drep);
2080
2081 static int cm_issue_drep(struct cm_port *port,
2082                          struct ib_mad_recv_wc *mad_recv_wc)
2083 {
2084         struct ib_mad_send_buf *msg = NULL;
2085         struct cm_dreq_msg *dreq_msg;
2086         struct cm_drep_msg *drep_msg;
2087         int ret;
2088
2089         ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
2090         if (ret)
2091                 return ret;
2092
2093         dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
2094         drep_msg = (struct cm_drep_msg *) msg->mad;
2095
2096         cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
2097         drep_msg->remote_comm_id = dreq_msg->local_comm_id;
2098         drep_msg->local_comm_id = dreq_msg->remote_comm_id;
2099
2100         ret = ib_post_send_mad(msg, NULL);
2101         if (ret)
2102                 cm_free_msg(msg);
2103
2104         return ret;
2105 }
2106
2107 static int cm_dreq_handler(struct cm_work *work)
2108 {
2109         struct cm_id_private *cm_id_priv;
2110         struct cm_dreq_msg *dreq_msg;
2111         struct ib_mad_send_buf *msg = NULL;
2112         int ret;
2113
2114         dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
2115         cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
2116                                    dreq_msg->local_comm_id);
2117         if (!cm_id_priv) {
2118                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2119                                 counter[CM_DREQ_COUNTER]);
2120                 cm_issue_drep(work->port, work->mad_recv_wc);
2121                 return -EINVAL;
2122         }
2123
2124         work->cm_event.private_data = &dreq_msg->private_data;
2125
2126         spin_lock_irq(&cm_id_priv->lock);
2127         if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
2128                 goto unlock;
2129
2130         switch (cm_id_priv->id.state) {
2131         case IB_CM_REP_SENT:
2132         case IB_CM_DREQ_SENT:
2133                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2134                 break;
2135         case IB_CM_ESTABLISHED:
2136                 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2137                     cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2138                         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2139                 break;
2140         case IB_CM_MRA_REP_RCVD:
2141                 break;
2142         case IB_CM_TIMEWAIT:
2143                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2144                                 counter[CM_DREQ_COUNTER]);
2145                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2146                         goto unlock;
2147
2148                 cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
2149                                cm_id_priv->private_data,
2150                                cm_id_priv->private_data_len);
2151                 spin_unlock_irq(&cm_id_priv->lock);
2152
2153                 if (ib_post_send_mad(msg, NULL))
2154                         cm_free_msg(msg);
2155                 goto deref;
2156         case IB_CM_DREQ_RCVD:
2157                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2158                                 counter[CM_DREQ_COUNTER]);
2159                 goto unlock;
2160         default:
2161                 goto unlock;
2162         }
2163         cm_id_priv->id.state = IB_CM_DREQ_RCVD;
2164         cm_id_priv->tid = dreq_msg->hdr.tid;
2165         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2166         if (!ret)
2167                 list_add_tail(&work->list, &cm_id_priv->work_list);
2168         spin_unlock_irq(&cm_id_priv->lock);
2169
2170         if (ret)
2171                 cm_process_work(cm_id_priv, work);
2172         else
2173                 cm_deref_id(cm_id_priv);
2174         return 0;
2175
2176 unlock: spin_unlock_irq(&cm_id_priv->lock);
2177 deref:  cm_deref_id(cm_id_priv);
2178         return -EINVAL;
2179 }
2180
2181 static int cm_drep_handler(struct cm_work *work)
2182 {
2183         struct cm_id_private *cm_id_priv;
2184         struct cm_drep_msg *drep_msg;
2185         int ret;
2186
2187         drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
2188         cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
2189                                    drep_msg->local_comm_id);
2190         if (!cm_id_priv)
2191                 return -EINVAL;
2192
2193         work->cm_event.private_data = &drep_msg->private_data;
2194
2195         spin_lock_irq(&cm_id_priv->lock);
2196         if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
2197             cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
2198                 spin_unlock_irq(&cm_id_priv->lock);
2199                 goto out;
2200         }
2201         cm_enter_timewait(cm_id_priv);
2202
2203         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2204         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2205         if (!ret)
2206                 list_add_tail(&work->list, &cm_id_priv->work_list);
2207         spin_unlock_irq(&cm_id_priv->lock);
2208
2209         if (ret)
2210                 cm_process_work(cm_id_priv, work);
2211         else
2212                 cm_deref_id(cm_id_priv);
2213         return 0;
2214 out:
2215         cm_deref_id(cm_id_priv);
2216         return -EINVAL;
2217 }
2218
2219 int ib_send_cm_rej(struct ib_cm_id *cm_id,
2220                    enum ib_cm_rej_reason reason,
2221                    void *ari,
2222                    u8 ari_length,
2223                    const void *private_data,
2224                    u8 private_data_len)
2225 {
2226         struct cm_id_private *cm_id_priv;
2227         struct ib_mad_send_buf *msg;
2228         unsigned long flags;
2229         int ret;
2230
2231         if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
2232             (ari && ari_length > IB_CM_REJ_ARI_LENGTH))
2233                 return -EINVAL;
2234
2235         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2236
2237         spin_lock_irqsave(&cm_id_priv->lock, flags);
2238         switch (cm_id->state) {
2239         case IB_CM_REQ_SENT:
2240         case IB_CM_MRA_REQ_RCVD:
2241         case IB_CM_REQ_RCVD:
2242         case IB_CM_MRA_REQ_SENT:
2243         case IB_CM_REP_RCVD:
2244         case IB_CM_MRA_REP_SENT:
2245                 ret = cm_alloc_msg(cm_id_priv, &msg);
2246                 if (!ret)
2247                         cm_format_rej((struct cm_rej_msg *) msg->mad,
2248                                       cm_id_priv, reason, ari, ari_length,
2249                                       private_data, private_data_len);
2250
2251                 cm_reset_to_idle(cm_id_priv);
2252                 break;
2253         case IB_CM_REP_SENT:
2254         case IB_CM_MRA_REP_RCVD:
2255                 ret = cm_alloc_msg(cm_id_priv, &msg);
2256                 if (!ret)
2257                         cm_format_rej((struct cm_rej_msg *) msg->mad,
2258                                       cm_id_priv, reason, ari, ari_length,
2259                                       private_data, private_data_len);
2260
2261                 cm_enter_timewait(cm_id_priv);
2262                 break;
2263         default:
2264                 ret = -EINVAL;
2265                 goto out;
2266         }
2267
2268         if (ret)
2269                 goto out;
2270
2271         ret = ib_post_send_mad(msg, NULL);
2272         if (ret)
2273                 cm_free_msg(msg);
2274
2275 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2276         return ret;
2277 }
2278 EXPORT_SYMBOL(ib_send_cm_rej);
2279
2280 static void cm_format_rej_event(struct cm_work *work)
2281 {
2282         struct cm_rej_msg *rej_msg;
2283         struct ib_cm_rej_event_param *param;
2284
2285         rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2286         param = &work->cm_event.param.rej_rcvd;
2287         param->ari = rej_msg->ari;
2288         param->ari_length = cm_rej_get_reject_info_len(rej_msg);
2289         param->reason = __be16_to_cpu(rej_msg->reason);
2290         work->cm_event.private_data = &rej_msg->private_data;
2291 }
2292
2293 static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
2294 {
2295         struct cm_timewait_info *timewait_info;
2296         struct cm_id_private *cm_id_priv;
2297         __be32 remote_id;
2298
2299         remote_id = rej_msg->local_comm_id;
2300
2301         if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
2302                 spin_lock_irq(&cm.lock);
2303                 timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
2304                                                   remote_id);
2305                 if (!timewait_info) {
2306                         spin_unlock_irq(&cm.lock);
2307                         return NULL;
2308                 }
2309                 cm_id_priv = idr_find(&cm.local_id_table, (__force int)
2310                                       (timewait_info->work.local_id ^
2311                                        cm.random_id_operand));
2312                 if (cm_id_priv) {
2313                         if (cm_id_priv->id.remote_id == remote_id)
2314                                 atomic_inc(&cm_id_priv->refcount);
2315                         else
2316                                 cm_id_priv = NULL;
2317                 }
2318                 spin_unlock_irq(&cm.lock);
2319         } else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
2320                 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
2321         else
2322                 cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
2323
2324         return cm_id_priv;
2325 }
2326
2327 static int cm_rej_handler(struct cm_work *work)
2328 {
2329         struct cm_id_private *cm_id_priv;
2330         struct cm_rej_msg *rej_msg;
2331         int ret;
2332
2333         rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
2334         cm_id_priv = cm_acquire_rejected_id(rej_msg);
2335         if (!cm_id_priv)
2336                 return -EINVAL;
2337
2338         cm_format_rej_event(work);
2339
2340         spin_lock_irq(&cm_id_priv->lock);
2341         switch (cm_id_priv->id.state) {
2342         case IB_CM_REQ_SENT:
2343         case IB_CM_MRA_REQ_RCVD:
2344         case IB_CM_REP_SENT:
2345         case IB_CM_MRA_REP_RCVD:
2346                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2347                 /* fall through */
2348         case IB_CM_REQ_RCVD:
2349         case IB_CM_MRA_REQ_SENT:
2350                 if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
2351                         cm_enter_timewait(cm_id_priv);
2352                 else
2353                         cm_reset_to_idle(cm_id_priv);
2354                 break;
2355         case IB_CM_DREQ_SENT:
2356                 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2357                 /* fall through */
2358         case IB_CM_REP_RCVD:
2359         case IB_CM_MRA_REP_SENT:
2360                 cm_enter_timewait(cm_id_priv);
2361                 break;
2362         case IB_CM_ESTABLISHED:
2363                 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2364                     cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2365                         if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2366                                 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2367                                               cm_id_priv->msg);
2368                         cm_enter_timewait(cm_id_priv);
2369                         break;
2370                 }
2371                 /* fall through */
2372         default:
2373                 spin_unlock_irq(&cm_id_priv->lock);
2374                 ret = -EINVAL;
2375                 goto out;
2376         }
2377
2378         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2379         if (!ret)
2380                 list_add_tail(&work->list, &cm_id_priv->work_list);
2381         spin_unlock_irq(&cm_id_priv->lock);
2382
2383         if (ret)
2384                 cm_process_work(cm_id_priv, work);
2385         else
2386                 cm_deref_id(cm_id_priv);
2387         return 0;
2388 out:
2389         cm_deref_id(cm_id_priv);
2390         return -EINVAL;
2391 }
2392
2393 int ib_send_cm_mra(struct ib_cm_id *cm_id,
2394                    u8 service_timeout,
2395                    const void *private_data,
2396                    u8 private_data_len)
2397 {
2398         struct cm_id_private *cm_id_priv;
2399         struct ib_mad_send_buf *msg;
2400         enum ib_cm_state cm_state;
2401         enum ib_cm_lap_state lap_state;
2402         enum cm_msg_response msg_response;
2403         void *data;
2404         unsigned long flags;
2405         int ret;
2406
2407         if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
2408                 return -EINVAL;
2409
2410         data = cm_copy_private_data(private_data, private_data_len);
2411         if (IS_ERR(data))
2412                 return PTR_ERR(data);
2413
2414         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2415
2416         spin_lock_irqsave(&cm_id_priv->lock, flags);
2417         switch(cm_id_priv->id.state) {
2418         case IB_CM_REQ_RCVD:
2419                 cm_state = IB_CM_MRA_REQ_SENT;
2420                 lap_state = cm_id->lap_state;
2421                 msg_response = CM_MSG_RESPONSE_REQ;
2422                 break;
2423         case IB_CM_REP_RCVD:
2424                 cm_state = IB_CM_MRA_REP_SENT;
2425                 lap_state = cm_id->lap_state;
2426                 msg_response = CM_MSG_RESPONSE_REP;
2427                 break;
2428         case IB_CM_ESTABLISHED:
2429                 if (cm_id->lap_state == IB_CM_LAP_RCVD) {
2430                         cm_state = cm_id->state;
2431                         lap_state = IB_CM_MRA_LAP_SENT;
2432                         msg_response = CM_MSG_RESPONSE_OTHER;
2433                         break;
2434                 }
2435         default:
2436                 ret = -EINVAL;
2437                 goto error1;
2438         }
2439
2440         if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
2441                 ret = cm_alloc_msg(cm_id_priv, &msg);
2442                 if (ret)
2443                         goto error1;
2444
2445                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2446                               msg_response, service_timeout,
2447                               private_data, private_data_len);
2448                 ret = ib_post_send_mad(msg, NULL);
2449                 if (ret)
2450                         goto error2;
2451         }
2452
2453         cm_id->state = cm_state;
2454         cm_id->lap_state = lap_state;
2455         cm_id_priv->service_timeout = service_timeout;
2456         cm_set_private_data(cm_id_priv, data, private_data_len);
2457         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2458         return 0;
2459
2460 error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2461         kfree(data);
2462         return ret;
2463
2464 error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2465         kfree(data);
2466         cm_free_msg(msg);
2467         return ret;
2468 }
2469 EXPORT_SYMBOL(ib_send_cm_mra);
2470
2471 static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
2472 {
2473         switch (cm_mra_get_msg_mraed(mra_msg)) {
2474         case CM_MSG_RESPONSE_REQ:
2475                 return cm_acquire_id(mra_msg->remote_comm_id, 0);
2476         case CM_MSG_RESPONSE_REP:
2477         case CM_MSG_RESPONSE_OTHER:
2478                 return cm_acquire_id(mra_msg->remote_comm_id,
2479                                      mra_msg->local_comm_id);
2480         default:
2481                 return NULL;
2482         }
2483 }
2484
2485 static int cm_mra_handler(struct cm_work *work)
2486 {
2487         struct cm_id_private *cm_id_priv;
2488         struct cm_mra_msg *mra_msg;
2489         int timeout, ret;
2490
2491         mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
2492         cm_id_priv = cm_acquire_mraed_id(mra_msg);
2493         if (!cm_id_priv)
2494                 return -EINVAL;
2495
2496         work->cm_event.private_data = &mra_msg->private_data;
2497         work->cm_event.param.mra_rcvd.service_timeout =
2498                                         cm_mra_get_service_timeout(mra_msg);
2499         timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
2500                   cm_convert_to_ms(cm_id_priv->av.timeout);
2501
2502         spin_lock_irq(&cm_id_priv->lock);
2503         switch (cm_id_priv->id.state) {
2504         case IB_CM_REQ_SENT:
2505                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
2506                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2507                                   cm_id_priv->msg, timeout))
2508                         goto out;
2509                 cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
2510                 break;
2511         case IB_CM_REP_SENT:
2512                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
2513                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2514                                   cm_id_priv->msg, timeout))
2515                         goto out;
2516                 cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
2517                 break;
2518         case IB_CM_ESTABLISHED:
2519                 if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
2520                     cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
2521                     ib_modify_mad(cm_id_priv->av.port->mad_agent,
2522                                   cm_id_priv->msg, timeout)) {
2523                         if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2524                                 atomic_long_inc(&work->port->
2525                                                 counter_group[CM_RECV_DUPLICATES].
2526                                                 counter[CM_MRA_COUNTER]);
2527                         goto out;
2528                 }
2529                 cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
2530                 break;
2531         case IB_CM_MRA_REQ_RCVD:
2532         case IB_CM_MRA_REP_RCVD:
2533                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2534                                 counter[CM_MRA_COUNTER]);
2535                 /* fall through */
2536         default:
2537                 goto out;
2538         }
2539
2540         cm_id_priv->msg->context[1] = (void *) (unsigned long)
2541                                       cm_id_priv->id.state;
2542         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2543         if (!ret)
2544                 list_add_tail(&work->list, &cm_id_priv->work_list);
2545         spin_unlock_irq(&cm_id_priv->lock);
2546
2547         if (ret)
2548                 cm_process_work(cm_id_priv, work);
2549         else
2550                 cm_deref_id(cm_id_priv);
2551         return 0;
2552 out:
2553         spin_unlock_irq(&cm_id_priv->lock);
2554         cm_deref_id(cm_id_priv);
2555         return -EINVAL;
2556 }
2557
2558 static void cm_format_lap(struct cm_lap_msg *lap_msg,
2559                           struct cm_id_private *cm_id_priv,
2560                           struct ib_sa_path_rec *alternate_path,
2561                           const void *private_data,
2562                           u8 private_data_len)
2563 {
2564         cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
2565                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
2566         lap_msg->local_comm_id = cm_id_priv->id.local_id;
2567         lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
2568         cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
2569         /* todo: need remote CM response timeout */
2570         cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
2571         lap_msg->alt_local_lid = alternate_path->slid;
2572         lap_msg->alt_remote_lid = alternate_path->dlid;
2573         lap_msg->alt_local_gid = alternate_path->sgid;
2574         lap_msg->alt_remote_gid = alternate_path->dgid;
2575         cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
2576         cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
2577         lap_msg->alt_hop_limit = alternate_path->hop_limit;
2578         cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
2579         cm_lap_set_sl(lap_msg, alternate_path->sl);
2580         cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
2581         cm_lap_set_local_ack_timeout(lap_msg,
2582                 cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
2583                                alternate_path->packet_life_time));
2584
2585         if (private_data && private_data_len)
2586                 memcpy(lap_msg->private_data, private_data, private_data_len);
2587 }
2588
2589 int ib_send_cm_lap(struct ib_cm_id *cm_id,
2590                    struct ib_sa_path_rec *alternate_path,
2591                    const void *private_data,
2592                    u8 private_data_len)
2593 {
2594         struct cm_id_private *cm_id_priv;
2595         struct ib_mad_send_buf *msg;
2596         unsigned long flags;
2597         int ret;
2598
2599         if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
2600                 return -EINVAL;
2601
2602         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2603         spin_lock_irqsave(&cm_id_priv->lock, flags);
2604         if (cm_id->state != IB_CM_ESTABLISHED ||
2605             (cm_id->lap_state != IB_CM_LAP_UNINIT &&
2606              cm_id->lap_state != IB_CM_LAP_IDLE)) {
2607                 ret = -EINVAL;
2608                 goto out;
2609         }
2610
2611         ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
2612         if (ret)
2613                 goto out;
2614         cm_id_priv->alt_av.timeout =
2615                         cm_ack_timeout(cm_id_priv->target_ack_delay,
2616                                        cm_id_priv->alt_av.timeout - 1);
2617
2618         ret = cm_alloc_msg(cm_id_priv, &msg);
2619         if (ret)
2620                 goto out;
2621
2622         cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
2623                       alternate_path, private_data, private_data_len);
2624         msg->timeout_ms = cm_id_priv->timeout_ms;
2625         msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
2626
2627         ret = ib_post_send_mad(msg, NULL);
2628         if (ret) {
2629                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2630                 cm_free_msg(msg);
2631                 return ret;
2632         }
2633
2634         cm_id->lap_state = IB_CM_LAP_SENT;
2635         cm_id_priv->msg = msg;
2636
2637 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2638         return ret;
2639 }
2640 EXPORT_SYMBOL(ib_send_cm_lap);
2641
2642 static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
2643                                     struct ib_sa_path_rec *path,
2644                                     struct cm_lap_msg *lap_msg)
2645 {
2646         memset(path, 0, sizeof *path);
2647         path->dgid = lap_msg->alt_local_gid;
2648         path->sgid = lap_msg->alt_remote_gid;
2649         path->dlid = lap_msg->alt_local_lid;
2650         path->slid = lap_msg->alt_remote_lid;
2651         path->flow_label = cm_lap_get_flow_label(lap_msg);
2652         path->hop_limit = lap_msg->alt_hop_limit;
2653         path->traffic_class = cm_lap_get_traffic_class(lap_msg);
2654         path->reversible = 1;
2655         path->pkey = cm_id_priv->pkey;
2656         path->sl = cm_lap_get_sl(lap_msg);
2657         path->mtu_selector = IB_SA_EQ;
2658         path->mtu = cm_id_priv->path_mtu;
2659         path->rate_selector = IB_SA_EQ;
2660         path->rate = cm_lap_get_packet_rate(lap_msg);
2661         path->packet_life_time_selector = IB_SA_EQ;
2662         path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
2663         path->packet_life_time -= (path->packet_life_time > 0);
2664 }
2665
2666 static int cm_lap_handler(struct cm_work *work)
2667 {
2668         struct cm_id_private *cm_id_priv;
2669         struct cm_lap_msg *lap_msg;
2670         struct ib_cm_lap_event_param *param;
2671         struct ib_mad_send_buf *msg = NULL;
2672         int ret;
2673
2674         /* todo: verify LAP request and send reject APR if invalid. */
2675         lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
2676         cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
2677                                    lap_msg->local_comm_id);
2678         if (!cm_id_priv)
2679                 return -EINVAL;
2680
2681         param = &work->cm_event.param.lap_rcvd;
2682         param->alternate_path = &work->path[0];
2683         cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
2684         work->cm_event.private_data = &lap_msg->private_data;
2685
2686         spin_lock_irq(&cm_id_priv->lock);
2687         if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
2688                 goto unlock;
2689
2690         switch (cm_id_priv->id.lap_state) {
2691         case IB_CM_LAP_UNINIT:
2692         case IB_CM_LAP_IDLE:
2693                 break;
2694         case IB_CM_MRA_LAP_SENT:
2695                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2696                                 counter[CM_LAP_COUNTER]);
2697                 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
2698                         goto unlock;
2699
2700                 cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
2701                               CM_MSG_RESPONSE_OTHER,
2702                               cm_id_priv->service_timeout,
2703                               cm_id_priv->private_data,
2704                               cm_id_priv->private_data_len);
2705                 spin_unlock_irq(&cm_id_priv->lock);
2706
2707                 if (ib_post_send_mad(msg, NULL))
2708                         cm_free_msg(msg);
2709                 goto deref;
2710         case IB_CM_LAP_RCVD:
2711                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2712                                 counter[CM_LAP_COUNTER]);
2713                 goto unlock;
2714         default:
2715                 goto unlock;
2716         }
2717
2718         cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
2719         cm_id_priv->tid = lap_msg->hdr.tid;
2720         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2721                                 work->mad_recv_wc->recv_buf.grh,
2722                                 &cm_id_priv->av);
2723         cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
2724         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2725         if (!ret)
2726                 list_add_tail(&work->list, &cm_id_priv->work_list);
2727         spin_unlock_irq(&cm_id_priv->lock);
2728
2729         if (ret)
2730                 cm_process_work(cm_id_priv, work);
2731         else
2732                 cm_deref_id(cm_id_priv);
2733         return 0;
2734
2735 unlock: spin_unlock_irq(&cm_id_priv->lock);
2736 deref:  cm_deref_id(cm_id_priv);
2737         return -EINVAL;
2738 }
2739
2740 static void cm_format_apr(struct cm_apr_msg *apr_msg,
2741                           struct cm_id_private *cm_id_priv,
2742                           enum ib_cm_apr_status status,
2743                           void *info,
2744                           u8 info_length,
2745                           const void *private_data,
2746                           u8 private_data_len)
2747 {
2748         cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
2749         apr_msg->local_comm_id = cm_id_priv->id.local_id;
2750         apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
2751         apr_msg->ap_status = (u8) status;
2752
2753         if (info && info_length) {
2754                 apr_msg->info_length = info_length;
2755                 memcpy(apr_msg->info, info, info_length);
2756         }
2757
2758         if (private_data && private_data_len)
2759                 memcpy(apr_msg->private_data, private_data, private_data_len);
2760 }
2761
2762 int ib_send_cm_apr(struct ib_cm_id *cm_id,
2763                    enum ib_cm_apr_status status,
2764                    void *info,
2765                    u8 info_length,
2766                    const void *private_data,
2767                    u8 private_data_len)
2768 {
2769         struct cm_id_private *cm_id_priv;
2770         struct ib_mad_send_buf *msg;
2771         unsigned long flags;
2772         int ret;
2773
2774         if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
2775             (info && info_length > IB_CM_APR_INFO_LENGTH))
2776                 return -EINVAL;
2777
2778         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2779         spin_lock_irqsave(&cm_id_priv->lock, flags);
2780         if (cm_id->state != IB_CM_ESTABLISHED ||
2781             (cm_id->lap_state != IB_CM_LAP_RCVD &&
2782              cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
2783                 ret = -EINVAL;
2784                 goto out;
2785         }
2786
2787         ret = cm_alloc_msg(cm_id_priv, &msg);
2788         if (ret)
2789                 goto out;
2790
2791         cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
2792                       info, info_length, private_data, private_data_len);
2793         ret = ib_post_send_mad(msg, NULL);
2794         if (ret) {
2795                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2796                 cm_free_msg(msg);
2797                 return ret;
2798         }
2799
2800         cm_id->lap_state = IB_CM_LAP_IDLE;
2801 out:    spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2802         return ret;
2803 }
2804 EXPORT_SYMBOL(ib_send_cm_apr);
2805
2806 static int cm_apr_handler(struct cm_work *work)
2807 {
2808         struct cm_id_private *cm_id_priv;
2809         struct cm_apr_msg *apr_msg;
2810         int ret;
2811
2812         apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
2813         cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
2814                                    apr_msg->local_comm_id);
2815         if (!cm_id_priv)
2816                 return -EINVAL; /* Unmatched reply. */
2817
2818         work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
2819         work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
2820         work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
2821         work->cm_event.private_data = &apr_msg->private_data;
2822
2823         spin_lock_irq(&cm_id_priv->lock);
2824         if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
2825             (cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
2826              cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
2827                 spin_unlock_irq(&cm_id_priv->lock);
2828                 goto out;
2829         }
2830         cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
2831         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2832         cm_id_priv->msg = NULL;
2833
2834         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2835         if (!ret)
2836                 list_add_tail(&work->list, &cm_id_priv->work_list);
2837         spin_unlock_irq(&cm_id_priv->lock);
2838
2839         if (ret)
2840                 cm_process_work(cm_id_priv, work);
2841         else
2842                 cm_deref_id(cm_id_priv);
2843         return 0;
2844 out:
2845         cm_deref_id(cm_id_priv);
2846         return -EINVAL;
2847 }
2848
2849 static int cm_timewait_handler(struct cm_work *work)
2850 {
2851         struct cm_timewait_info *timewait_info;
2852         struct cm_id_private *cm_id_priv;
2853         int ret;
2854
2855         timewait_info = (struct cm_timewait_info *)work;
2856         spin_lock_irq(&cm.lock);
2857         list_del(&timewait_info->list);
2858         spin_unlock_irq(&cm.lock);
2859
2860         cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
2861                                    timewait_info->work.remote_id);
2862         if (!cm_id_priv)
2863                 return -EINVAL;
2864
2865         spin_lock_irq(&cm_id_priv->lock);
2866         if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
2867             cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
2868                 spin_unlock_irq(&cm_id_priv->lock);
2869                 goto out;
2870         }
2871         cm_id_priv->id.state = IB_CM_IDLE;
2872         ret = atomic_inc_and_test(&cm_id_priv->work_count);
2873         if (!ret)
2874                 list_add_tail(&work->list, &cm_id_priv->work_list);
2875         spin_unlock_irq(&cm_id_priv->lock);
2876
2877         if (ret)
2878                 cm_process_work(cm_id_priv, work);
2879         else
2880                 cm_deref_id(cm_id_priv);
2881         return 0;
2882 out:
2883         cm_deref_id(cm_id_priv);
2884         return -EINVAL;
2885 }
2886
2887 static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
2888                                struct cm_id_private *cm_id_priv,
2889                                struct ib_cm_sidr_req_param *param)
2890 {
2891         cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
2892                           cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
2893         sidr_req_msg->request_id = cm_id_priv->id.local_id;
2894         sidr_req_msg->pkey = param->path->pkey;
2895         sidr_req_msg->service_id = param->service_id;
2896
2897         if (param->private_data && param->private_data_len)
2898                 memcpy(sidr_req_msg->private_data, param->private_data,
2899                        param->private_data_len);
2900 }
2901
2902 int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
2903                         struct ib_cm_sidr_req_param *param)
2904 {
2905         struct cm_id_private *cm_id_priv;
2906         struct ib_mad_send_buf *msg;
2907         unsigned long flags;
2908         int ret;
2909
2910         if (!param->path || (param->private_data &&
2911              param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
2912                 return -EINVAL;
2913
2914         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2915         ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
2916         if (ret)
2917                 goto out;
2918
2919         cm_id->service_id = param->service_id;
2920         cm_id->service_mask = ~cpu_to_be64(0);
2921         cm_id_priv->timeout_ms = param->timeout_ms;
2922         cm_id_priv->max_cm_retries = param->max_cm_retries;
2923         ret = cm_alloc_msg(cm_id_priv, &msg);
2924         if (ret)
2925                 goto out;
2926
2927         cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
2928                            param);
2929         msg->timeout_ms = cm_id_priv->timeout_ms;
2930         msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
2931
2932         spin_lock_irqsave(&cm_id_priv->lock, flags);
2933         if (cm_id->state == IB_CM_IDLE)
2934                 ret = ib_post_send_mad(msg, NULL);
2935         else
2936                 ret = -EINVAL;
2937
2938         if (ret) {
2939                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2940                 cm_free_msg(msg);
2941                 goto out;
2942         }
2943         cm_id->state = IB_CM_SIDR_REQ_SENT;
2944         cm_id_priv->msg = msg;
2945         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
2946 out:
2947         return ret;
2948 }
2949 EXPORT_SYMBOL(ib_send_cm_sidr_req);
2950
2951 static void cm_format_sidr_req_event(struct cm_work *work,
2952                                      struct ib_cm_id *listen_id)
2953 {
2954         struct cm_sidr_req_msg *sidr_req_msg;
2955         struct ib_cm_sidr_req_event_param *param;
2956
2957         sidr_req_msg = (struct cm_sidr_req_msg *)
2958                                 work->mad_recv_wc->recv_buf.mad;
2959         param = &work->cm_event.param.sidr_req_rcvd;
2960         param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
2961         param->listen_id = listen_id;
2962         param->port = work->port->port_num;
2963         work->cm_event.private_data = &sidr_req_msg->private_data;
2964 }
2965
2966 static int cm_sidr_req_handler(struct cm_work *work)
2967 {
2968         struct ib_cm_id *cm_id;
2969         struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
2970         struct cm_sidr_req_msg *sidr_req_msg;
2971         struct ib_wc *wc;
2972
2973         cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
2974         if (IS_ERR(cm_id))
2975                 return PTR_ERR(cm_id);
2976         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
2977
2978         /* Record SGID/SLID and request ID for lookup. */
2979         sidr_req_msg = (struct cm_sidr_req_msg *)
2980                                 work->mad_recv_wc->recv_buf.mad;
2981         wc = work->mad_recv_wc->wc;
2982         cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
2983         cm_id_priv->av.dgid.global.interface_id = 0;
2984         cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
2985                                 work->mad_recv_wc->recv_buf.grh,
2986                                 &cm_id_priv->av);
2987         cm_id_priv->id.remote_id = sidr_req_msg->request_id;
2988         cm_id_priv->tid = sidr_req_msg->hdr.tid;
2989         atomic_inc(&cm_id_priv->work_count);
2990
2991         spin_lock_irq(&cm.lock);
2992         cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
2993         if (cur_cm_id_priv) {
2994                 spin_unlock_irq(&cm.lock);
2995                 atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
2996                                 counter[CM_SIDR_REQ_COUNTER]);
2997                 goto out; /* Duplicate message. */
2998         }
2999         cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
3000         cur_cm_id_priv = cm_find_listen(cm_id->device,
3001                                         sidr_req_msg->service_id,
3002                                         sidr_req_msg->private_data);
3003         if (!cur_cm_id_priv) {
3004                 spin_unlock_irq(&cm.lock);
3005                 cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
3006                 goto out; /* No match. */
3007         }
3008         atomic_inc(&cur_cm_id_priv->refcount);
3009         atomic_inc(&cm_id_priv->refcount);
3010         spin_unlock_irq(&cm.lock);
3011
3012         cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
3013         cm_id_priv->id.context = cur_cm_id_priv->id.context;
3014         cm_id_priv->id.service_id = sidr_req_msg->service_id;
3015         cm_id_priv->id.service_mask = ~cpu_to_be64(0);
3016
3017         cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
3018         cm_process_work(cm_id_priv, work);
3019         cm_deref_id(cur_cm_id_priv);
3020         return 0;
3021 out:
3022         ib_destroy_cm_id(&cm_id_priv->id);
3023         return -EINVAL;
3024 }
3025
3026 static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
3027                                struct cm_id_private *cm_id_priv,
3028                                struct ib_cm_sidr_rep_param *param)
3029 {
3030         cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
3031                           cm_id_priv->tid);
3032         sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
3033         sidr_rep_msg->status = param->status;
3034         cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
3035         sidr_rep_msg->service_id = cm_id_priv->id.service_id;
3036         sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
3037
3038         if (param->info && param->info_length)
3039                 memcpy(sidr_rep_msg->info, param->info, param->info_length);
3040
3041         if (param->private_data && param->private_data_len)
3042                 memcpy(sidr_rep_msg->private_data, param->private_data,
3043                        param->private_data_len);
3044 }
3045
3046 int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
3047                         struct ib_cm_sidr_rep_param *param)
3048 {
3049         struct cm_id_private *cm_id_priv;
3050         struct ib_mad_send_buf *msg;
3051         unsigned long flags;
3052         int ret;
3053
3054         if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
3055             (param->private_data &&
3056              param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
3057                 return -EINVAL;
3058
3059         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3060         spin_lock_irqsave(&cm_id_priv->lock, flags);
3061         if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
3062                 ret = -EINVAL;
3063                 goto error;
3064         }
3065
3066         ret = cm_alloc_msg(cm_id_priv, &msg);
3067         if (ret)
3068                 goto error;
3069
3070         cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
3071                            param);
3072         ret = ib_post_send_mad(msg, NULL);
3073         if (ret) {
3074                 spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3075                 cm_free_msg(msg);
3076                 return ret;
3077         }
3078         cm_id->state = IB_CM_IDLE;
3079         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3080
3081         spin_lock_irqsave(&cm.lock, flags);
3082         rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
3083         spin_unlock_irqrestore(&cm.lock, flags);
3084         return 0;
3085
3086 error:  spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3087         return ret;
3088 }
3089 EXPORT_SYMBOL(ib_send_cm_sidr_rep);
3090
3091 static void cm_format_sidr_rep_event(struct cm_work *work)
3092 {
3093         struct cm_sidr_rep_msg *sidr_rep_msg;
3094         struct ib_cm_sidr_rep_event_param *param;
3095
3096         sidr_rep_msg = (struct cm_sidr_rep_msg *)
3097                                 work->mad_recv_wc->recv_buf.mad;
3098         param = &work->cm_event.param.sidr_rep_rcvd;
3099         param->status = sidr_rep_msg->status;
3100         param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
3101         param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
3102         param->info = &sidr_rep_msg->info;
3103         param->info_len = sidr_rep_msg->info_length;
3104         work->cm_event.private_data = &sidr_rep_msg->private_data;
3105 }
3106
3107 static int cm_sidr_rep_handler(struct cm_work *work)
3108 {
3109         struct cm_sidr_rep_msg *sidr_rep_msg;
3110         struct cm_id_private *cm_id_priv;
3111
3112         sidr_rep_msg = (struct cm_sidr_rep_msg *)
3113                                 work->mad_recv_wc->recv_buf.mad;
3114         cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
3115         if (!cm_id_priv)
3116                 return -EINVAL; /* Unmatched reply. */
3117
3118         spin_lock_irq(&cm_id_priv->lock);
3119         if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
3120                 spin_unlock_irq(&cm_id_priv->lock);
3121                 goto out;
3122         }
3123         cm_id_priv->id.state = IB_CM_IDLE;
3124         ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
3125         spin_unlock_irq(&cm_id_priv->lock);
3126
3127         cm_format_sidr_rep_event(work);
3128         cm_process_work(cm_id_priv, work);
3129         return 0;
3130 out:
3131         cm_deref_id(cm_id_priv);
3132         return -EINVAL;
3133 }
3134
3135 static void cm_process_send_error(struct ib_mad_send_buf *msg,
3136                                   enum ib_wc_status wc_status)
3137 {
3138         struct cm_id_private *cm_id_priv;
3139         struct ib_cm_event cm_event;
3140         enum ib_cm_state state;
3141         int ret;
3142
3143         memset(&cm_event, 0, sizeof cm_event);
3144         cm_id_priv = msg->context[0];
3145
3146         /* Discard old sends or ones without a response. */
3147         spin_lock_irq(&cm_id_priv->lock);
3148         state = (enum ib_cm_state) (unsigned long) msg->context[1];
3149         if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
3150                 goto discard;
3151
3152         switch (state) {
3153         case IB_CM_REQ_SENT:
3154         case IB_CM_MRA_REQ_RCVD:
3155                 cm_reset_to_idle(cm_id_priv);
3156                 cm_event.event = IB_CM_REQ_ERROR;
3157                 break;
3158         case IB_CM_REP_SENT:
3159         case IB_CM_MRA_REP_RCVD:
3160                 cm_reset_to_idle(cm_id_priv);
3161                 cm_event.event = IB_CM_REP_ERROR;
3162                 break;
3163         case IB_CM_DREQ_SENT:
3164                 cm_enter_timewait(cm_id_priv);
3165                 cm_event.event = IB_CM_DREQ_ERROR;
3166                 break;
3167         case IB_CM_SIDR_REQ_SENT:
3168                 cm_id_priv->id.state = IB_CM_IDLE;
3169                 cm_event.event = IB_CM_SIDR_REQ_ERROR;
3170                 break;
3171         default:
3172                 goto discard;
3173         }
3174         spin_unlock_irq(&cm_id_priv->lock);
3175         cm_event.param.send_status = wc_status;
3176
3177         /* No other events can occur on the cm_id at this point. */
3178         ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
3179         cm_free_msg(msg);
3180         if (ret)
3181                 ib_destroy_cm_id(&cm_id_priv->id);
3182         return;
3183 discard:
3184         spin_unlock_irq(&cm_id_priv->lock);
3185         cm_free_msg(msg);
3186 }
3187
3188 static void cm_send_handler(struct ib_mad_agent *mad_agent,
3189                             struct ib_mad_send_wc *mad_send_wc)
3190 {
3191         struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
3192         struct cm_port *port;
3193         u16 attr_index;
3194
3195         port = mad_agent->context;
3196         attr_index = be16_to_cpu(((struct ib_mad_hdr *)
3197                                   msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
3198
3199         /*
3200          * If the send was in response to a received message (context[0] is not
3201          * set to a cm_id), and is not a REJ, then it is a send that was
3202          * manually retried.
3203          */
3204         if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
3205                 msg->retries = 1;
3206
3207         atomic_long_add(1 + msg->retries,
3208                         &port->counter_group[CM_XMIT].counter[attr_index]);
3209         if (msg->retries)
3210                 atomic_long_add(msg->retries,
3211                                 &port->counter_group[CM_XMIT_RETRIES].
3212                                 counter[attr_index]);
3213
3214         switch (mad_send_wc->status) {
3215         case IB_WC_SUCCESS:
3216         case IB_WC_WR_FLUSH_ERR:
3217                 cm_free_msg(msg);
3218                 break;
3219         default:
3220                 if (msg->context[0] && msg->context[1])
3221                         cm_process_send_error(msg, mad_send_wc->status);
3222                 else
3223                         cm_free_msg(msg);
3224                 break;
3225         }
3226 }
3227
3228 static void cm_work_handler(struct work_struct *_work)
3229 {
3230         struct cm_work *work = container_of(_work, struct cm_work, work.work);
3231         int ret;
3232
3233         switch (work->cm_event.event) {
3234         case IB_CM_REQ_RECEIVED:
3235                 ret = cm_req_handler(work);
3236                 break;
3237         case IB_CM_MRA_RECEIVED:
3238                 ret = cm_mra_handler(work);
3239                 break;
3240         case IB_CM_REJ_RECEIVED:
3241                 ret = cm_rej_handler(work);
3242                 break;
3243         case IB_CM_REP_RECEIVED:
3244                 ret = cm_rep_handler(work);
3245                 break;
3246         case IB_CM_RTU_RECEIVED:
3247                 ret = cm_rtu_handler(work);
3248                 break;
3249         case IB_CM_USER_ESTABLISHED:
3250                 ret = cm_establish_handler(work);
3251                 break;
3252         case IB_CM_DREQ_RECEIVED:
3253                 ret = cm_dreq_handler(work);
3254                 break;
3255         case IB_CM_DREP_RECEIVED:
3256                 ret = cm_drep_handler(work);
3257                 break;
3258         case IB_CM_SIDR_REQ_RECEIVED:
3259                 ret = cm_sidr_req_handler(work);
3260                 break;
3261         case IB_CM_SIDR_REP_RECEIVED:
3262                 ret = cm_sidr_rep_handler(work);
3263                 break;
3264         case IB_CM_LAP_RECEIVED:
3265                 ret = cm_lap_handler(work);
3266                 break;
3267         case IB_CM_APR_RECEIVED:
3268                 ret = cm_apr_handler(work);
3269                 break;
3270         case IB_CM_TIMEWAIT_EXIT:
3271                 ret = cm_timewait_handler(work);
3272                 break;
3273         default:
3274                 ret = -EINVAL;
3275                 break;
3276         }
3277         if (ret)
3278                 cm_free_work(work);
3279 }
3280
3281 static int cm_establish(struct ib_cm_id *cm_id)
3282 {
3283         struct cm_id_private *cm_id_priv;
3284         struct cm_work *work;
3285         unsigned long flags;
3286         int ret = 0;
3287
3288         work = kmalloc(sizeof *work, GFP_ATOMIC);
3289         if (!work)
3290                 return -ENOMEM;
3291
3292         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3293         spin_lock_irqsave(&cm_id_priv->lock, flags);
3294         switch (cm_id->state)
3295         {
3296         case IB_CM_REP_SENT:
3297         case IB_CM_MRA_REP_RCVD:
3298                 cm_id->state = IB_CM_ESTABLISHED;
3299                 break;
3300         case IB_CM_ESTABLISHED:
3301                 ret = -EISCONN;
3302                 break;
3303         default:
3304                 ret = -EINVAL;
3305                 break;
3306         }
3307         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3308
3309         if (ret) {
3310                 kfree(work);
3311                 goto out;
3312         }
3313
3314         /*
3315          * The CM worker thread may try to destroy the cm_id before it
3316          * can execute this work item.  To prevent potential deadlock,
3317          * we need to find the cm_id once we're in the context of the
3318          * worker thread, rather than holding a reference on it.
3319          */
3320         INIT_DELAYED_WORK(&work->work, cm_work_handler);
3321         work->local_id = cm_id->local_id;
3322         work->remote_id = cm_id->remote_id;
3323         work->mad_recv_wc = NULL;
3324         work->cm_event.event = IB_CM_USER_ESTABLISHED;
3325         queue_delayed_work(cm.wq, &work->work, 0);
3326 out:
3327         return ret;
3328 }
3329
3330 static int cm_migrate(struct ib_cm_id *cm_id)
3331 {
3332         struct cm_id_private *cm_id_priv;
3333         unsigned long flags;
3334         int ret = 0;
3335
3336         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3337         spin_lock_irqsave(&cm_id_priv->lock, flags);
3338         if (cm_id->state == IB_CM_ESTABLISHED &&
3339             (cm_id->lap_state == IB_CM_LAP_UNINIT ||
3340              cm_id->lap_state == IB_CM_LAP_IDLE)) {
3341                 cm_id->lap_state = IB_CM_LAP_IDLE;
3342                 cm_id_priv->av = cm_id_priv->alt_av;
3343         } else
3344                 ret = -EINVAL;
3345         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3346
3347         return ret;
3348 }
3349
3350 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
3351 {
3352         int ret;
3353
3354         switch (event) {
3355         case IB_EVENT_COMM_EST:
3356                 ret = cm_establish(cm_id);
3357                 break;
3358         case IB_EVENT_PATH_MIG:
3359                 ret = cm_migrate(cm_id);
3360                 break;
3361         default:
3362                 ret = -EINVAL;
3363         }
3364         return ret;
3365 }
3366 EXPORT_SYMBOL(ib_cm_notify);
3367
3368 static void cm_recv_handler(struct ib_mad_agent *mad_agent,
3369                             struct ib_mad_recv_wc *mad_recv_wc)
3370 {
3371         struct cm_port *port = mad_agent->context;
3372         struct cm_work *work;
3373         enum ib_cm_event_type event;
3374         u16 attr_id;
3375         int paths = 0;
3376
3377         switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
3378         case CM_REQ_ATTR_ID:
3379                 paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
3380                                                     alt_local_lid != 0);
3381                 event = IB_CM_REQ_RECEIVED;
3382                 break;
3383         case CM_MRA_ATTR_ID:
3384                 event = IB_CM_MRA_RECEIVED;
3385                 break;
3386         case CM_REJ_ATTR_ID:
3387                 event = IB_CM_REJ_RECEIVED;
3388                 break;
3389         case CM_REP_ATTR_ID:
3390                 event = IB_CM_REP_RECEIVED;
3391                 break;
3392         case CM_RTU_ATTR_ID:
3393                 event = IB_CM_RTU_RECEIVED;
3394                 break;
3395         case CM_DREQ_ATTR_ID:
3396                 event = IB_CM_DREQ_RECEIVED;
3397                 break;
3398         case CM_DREP_ATTR_ID:
3399                 event = IB_CM_DREP_RECEIVED;
3400                 break;
3401         case CM_SIDR_REQ_ATTR_ID:
3402                 event = IB_CM_SIDR_REQ_RECEIVED;
3403                 break;
3404         case CM_SIDR_REP_ATTR_ID:
3405                 event = IB_CM_SIDR_REP_RECEIVED;
3406                 break;
3407         case CM_LAP_ATTR_ID:
3408                 paths = 1;
3409                 event = IB_CM_LAP_RECEIVED;
3410                 break;
3411         case CM_APR_ATTR_ID:
3412                 event = IB_CM_APR_RECEIVED;
3413                 break;
3414         default:
3415                 ib_free_recv_mad(mad_recv_wc);
3416                 return;
3417         }
3418
3419         attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
3420         atomic_long_inc(&port->counter_group[CM_RECV].
3421                         counter[attr_id - CM_ATTR_ID_OFFSET]);
3422
3423         work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
3424                        GFP_KERNEL);
3425         if (!work) {
3426                 ib_free_recv_mad(mad_recv_wc);
3427                 return;
3428         }
3429
3430         INIT_DELAYED_WORK(&work->work, cm_work_handler);
3431         work->cm_event.event = event;
3432         work->mad_recv_wc = mad_recv_wc;
3433         work->port = port;
3434         queue_delayed_work(cm.wq, &work->work, 0);
3435 }
3436
3437 static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
3438                                 struct ib_qp_attr *qp_attr,
3439                                 int *qp_attr_mask)
3440 {
3441         unsigned long flags;
3442         int ret;
3443
3444         spin_lock_irqsave(&cm_id_priv->lock, flags);
3445         switch (cm_id_priv->id.state) {
3446         case IB_CM_REQ_SENT:
3447         case IB_CM_MRA_REQ_RCVD:
3448         case IB_CM_REQ_RCVD:
3449         case IB_CM_MRA_REQ_SENT:
3450         case IB_CM_REP_RCVD:
3451         case IB_CM_MRA_REP_SENT:
3452         case IB_CM_REP_SENT:
3453         case IB_CM_MRA_REP_RCVD:
3454         case IB_CM_ESTABLISHED:
3455                 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
3456                                 IB_QP_PKEY_INDEX | IB_QP_PORT;
3457                 qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
3458                 if (cm_id_priv->responder_resources)
3459                         qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
3460                                                     IB_ACCESS_REMOTE_ATOMIC;
3461                 qp_attr->pkey_index = cm_id_priv->av.pkey_index;
3462                 qp_attr->port_num = cm_id_priv->av.port->port_num;
3463                 ret = 0;
3464                 break;
3465         default:
3466                 ret = -EINVAL;
3467                 break;
3468         }
3469         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3470         return ret;
3471 }
3472
3473 static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
3474                                struct ib_qp_attr *qp_attr,
3475                                int *qp_attr_mask)
3476 {
3477         unsigned long flags;
3478         int ret;
3479
3480         spin_lock_irqsave(&cm_id_priv->lock, flags);
3481         switch (cm_id_priv->id.state) {
3482         case IB_CM_REQ_RCVD:
3483         case IB_CM_MRA_REQ_SENT:
3484         case IB_CM_REP_RCVD:
3485         case IB_CM_MRA_REP_SENT:
3486         case IB_CM_REP_SENT:
3487         case IB_CM_MRA_REP_RCVD:
3488         case IB_CM_ESTABLISHED:
3489                 *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
3490                                 IB_QP_DEST_QPN | IB_QP_RQ_PSN;
3491                 qp_attr->ah_attr = cm_id_priv->av.ah_attr;
3492                 qp_attr->path_mtu = cm_id_priv->path_mtu;
3493                 qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
3494                 qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
3495                 if (cm_id_priv->qp_type == IB_QPT_RC) {
3496                         *qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
3497                                          IB_QP_MIN_RNR_TIMER;
3498                         qp_attr->max_dest_rd_atomic =
3499                                         cm_id_priv->responder_resources;
3500                         qp_attr->min_rnr_timer = 0;
3501                 }
3502                 if (cm_id_priv->alt_av.ah_attr.dlid) {
3503                         *qp_attr_mask |= IB_QP_ALT_PATH;
3504                         qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3505                         qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3506                         qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3507                         qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3508                 }
3509                 ret = 0;
3510                 break;
3511         default:
3512                 ret = -EINVAL;
3513                 break;
3514         }
3515         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3516         return ret;
3517 }
3518
3519 static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
3520                                struct ib_qp_attr *qp_attr,
3521                                int *qp_attr_mask)
3522 {
3523         unsigned long flags;
3524         int ret;
3525
3526         spin_lock_irqsave(&cm_id_priv->lock, flags);
3527         switch (cm_id_priv->id.state) {
3528         /* Allow transition to RTS before sending REP */
3529         case IB_CM_REQ_RCVD:
3530         case IB_CM_MRA_REQ_SENT:
3531
3532         case IB_CM_REP_RCVD:
3533         case IB_CM_MRA_REP_SENT:
3534         case IB_CM_REP_SENT:
3535         case IB_CM_MRA_REP_RCVD:
3536         case IB_CM_ESTABLISHED:
3537                 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
3538                         *qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
3539                         qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
3540                         if (cm_id_priv->qp_type == IB_QPT_RC) {
3541                                 *qp_attr_mask |= IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
3542                                                  IB_QP_RNR_RETRY |
3543                                                  IB_QP_MAX_QP_RD_ATOMIC;
3544                                 qp_attr->timeout = cm_id_priv->av.timeout;
3545                                 qp_attr->retry_cnt = cm_id_priv->retry_count;
3546                                 qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
3547                                 qp_attr->max_rd_atomic =
3548                                         cm_id_priv->initiator_depth;
3549                         }
3550                         if (cm_id_priv->alt_av.ah_attr.dlid) {
3551                                 *qp_attr_mask |= IB_QP_PATH_MIG_STATE;
3552                                 qp_attr->path_mig_state = IB_MIG_REARM;
3553                         }
3554                 } else {
3555                         *qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
3556                         qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
3557                         qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
3558                         qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
3559                         qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
3560                         qp_attr->path_mig_state = IB_MIG_REARM;
3561                 }
3562                 ret = 0;
3563                 break;
3564         default:
3565                 ret = -EINVAL;
3566                 break;
3567         }
3568         spin_unlock_irqrestore(&cm_id_priv->lock, flags);
3569         return ret;
3570 }
3571
3572 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
3573                        struct ib_qp_attr *qp_attr,
3574                        int *qp_attr_mask)
3575 {
3576         struct cm_id_private *cm_id_priv;
3577         int ret;
3578
3579         cm_id_priv = container_of(cm_id, struct cm_id_private, id);
3580         switch (qp_attr->qp_state) {
3581         case IB_QPS_INIT:
3582                 ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
3583                 break;
3584         case IB_QPS_RTR:
3585                 ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
3586                 break;
3587         case IB_QPS_RTS:
3588                 ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
3589                 break;
3590         default:
3591                 ret = -EINVAL;
3592                 break;
3593         }
3594         return ret;
3595 }
3596 EXPORT_SYMBOL(ib_cm_init_qp_attr);
3597
3598 static void cm_get_ack_delay(struct cm_device *cm_dev)
3599 {
3600         struct ib_device_attr attr;
3601
3602         if (ib_query_device(cm_dev->ib_device, &attr))
3603                 cm_dev->ack_delay = 0; /* acks will rely on packet life time */
3604         else
3605                 cm_dev->ack_delay = attr.local_ca_ack_delay;
3606 }
3607
3608 static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
3609                                char *buf)
3610 {
3611         struct cm_counter_group *group;
3612         struct cm_counter_attribute *cm_attr;
3613
3614         group = container_of(obj, struct cm_counter_group, obj);
3615         cm_attr = container_of(attr, struct cm_counter_attribute, attr);
3616
3617         return sprintf(buf, "%ld\n",
3618                        atomic_long_read(&group->counter[cm_attr->index]));
3619 }
3620
3621 static const struct sysfs_ops cm_counter_ops = {
3622         .show = cm_show_counter
3623 };
3624
3625 static struct kobj_type cm_counter_obj_type = {
3626         .sysfs_ops = &cm_counter_ops,
3627         .default_attrs = cm_counter_default_attrs
3628 };
3629
3630 static void cm_release_port_obj(struct kobject *obj)
3631 {
3632         struct cm_port *cm_port;
3633
3634         cm_port = container_of(obj, struct cm_port, port_obj);
3635         kfree(cm_port);
3636 }
3637
3638 static struct kobj_type cm_port_obj_type = {
3639         .release = cm_release_port_obj
3640 };
3641
3642 static char *cm_devnode(struct device *dev, mode_t *mode)
3643 {
3644         if (mode)
3645                 *mode = 0666;
3646         return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
3647 }
3648
3649 struct class cm_class = {
3650         .owner   = THIS_MODULE,
3651         .name    = "infiniband_cm",
3652         .devnode = cm_devnode,
3653 };
3654 EXPORT_SYMBOL(cm_class);
3655
3656 static int cm_create_port_fs(struct cm_port *port)
3657 {
3658         int i, ret;
3659
3660         ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
3661                                    &port->cm_dev->device->kobj,
3662                                    "%d", port->port_num);
3663         if (ret) {
3664                 kfree(port);
3665                 return ret;
3666         }
3667
3668         for (i = 0; i < CM_COUNTER_GROUPS; i++) {
3669                 ret = kobject_init_and_add(&port->counter_group[i].obj,
3670                                            &cm_counter_obj_type,
3671                                            &port->port_obj,
3672                                            "%s", counter_group_names[i]);
3673                 if (ret)
3674                         goto error;
3675         }
3676
3677         return 0;
3678
3679 error:
3680         while (i--)
3681                 kobject_put(&port->counter_group[i].obj);
3682         kobject_put(&port->port_obj);
3683         return ret;
3684
3685 }
3686
3687 static void cm_remove_port_fs(struct cm_port *port)
3688 {
3689         int i;
3690
3691         for (i = 0; i < CM_COUNTER_GROUPS; i++)
3692                 kobject_put(&port->counter_group[i].obj);
3693
3694         kobject_put(&port->port_obj);
3695 }
3696
3697 static void cm_add_one(struct ib_device *ib_device)
3698 {
3699         struct cm_device *cm_dev;
3700         struct cm_port *port;
3701         struct ib_mad_reg_req reg_req = {
3702                 .mgmt_class = IB_MGMT_CLASS_CM,
3703                 .mgmt_class_version = IB_CM_CLASS_VERSION
3704         };
3705         struct ib_port_modify port_modify = {
3706                 .set_port_cap_mask = IB_PORT_CM_SUP
3707         };
3708         unsigned long flags;
3709         int ret;
3710         u8 i;
3711
3712         if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
3713                 return;
3714
3715         cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
3716                          ib_device->phys_port_cnt, GFP_KERNEL);
3717         if (!cm_dev)
3718                 return;
3719
3720         cm_dev->ib_device = ib_device;
3721         cm_get_ack_delay(cm_dev);
3722
3723         cm_dev->device = device_create(&cm_class, &ib_device->dev,
3724                                        MKDEV(0, 0), NULL,
3725                                        "%s", ib_device->name);
3726         if (IS_ERR(cm_dev->device)) {
3727                 kfree(cm_dev);
3728                 return;
3729         }
3730
3731         set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
3732         for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3733                 port = kzalloc(sizeof *port, GFP_KERNEL);
3734                 if (!port)
3735                         goto error1;
3736
3737                 cm_dev->port[i-1] = port;
3738                 port->cm_dev = cm_dev;
3739                 port->port_num = i;
3740
3741                 ret = cm_create_port_fs(port);
3742                 if (ret)
3743                         goto error1;
3744
3745                 port->mad_agent = ib_register_mad_agent(ib_device, i,
3746                                                         IB_QPT_GSI,
3747                                                         &reg_req,
3748                                                         0,
3749                                                         cm_send_handler,
3750                                                         cm_recv_handler,
3751                                                         port);
3752                 if (IS_ERR(port->mad_agent))
3753                         goto error2;
3754
3755                 ret = ib_modify_port(ib_device, i, 0, &port_modify);
3756                 if (ret)
3757                         goto error3;
3758         }
3759         ib_set_client_data(ib_device, &cm_client, cm_dev);
3760
3761         write_lock_irqsave(&cm.device_lock, flags);
3762         list_add_tail(&cm_dev->list, &cm.device_list);
3763         write_unlock_irqrestore(&cm.device_lock, flags);
3764         return;
3765
3766 error3:
3767         ib_unregister_mad_agent(port->mad_agent);
3768 error2:
3769         cm_remove_port_fs(port);
3770 error1:
3771         port_modify.set_port_cap_mask = 0;
3772         port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
3773         while (--i) {
3774                 port = cm_dev->port[i-1];
3775                 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3776                 ib_unregister_mad_agent(port->mad_agent);
3777                 cm_remove_port_fs(port);
3778         }
3779         device_unregister(cm_dev->device);
3780         kfree(cm_dev);
3781 }
3782
3783 static void cm_remove_one(struct ib_device *ib_device)
3784 {
3785         struct cm_device *cm_dev;
3786         struct cm_port *port;
3787         struct ib_port_modify port_modify = {
3788                 .clr_port_cap_mask = IB_PORT_CM_SUP
3789         };
3790         unsigned long flags;
3791         int i;
3792
3793         cm_dev = ib_get_client_data(ib_device, &cm_client);
3794         if (!cm_dev)
3795                 return;
3796
3797         write_lock_irqsave(&cm.device_lock, flags);
3798         list_del(&cm_dev->list);
3799         write_unlock_irqrestore(&cm.device_lock, flags);
3800
3801         for (i = 1; i <= ib_device->phys_port_cnt; i++) {
3802                 port = cm_dev->port[i-1];
3803                 ib_modify_port(ib_device, port->port_num, 0, &port_modify);
3804                 ib_unregister_mad_agent(port->mad_agent);
3805                 flush_workqueue(cm.wq);
3806                 cm_remove_port_fs(port);
3807         }
3808         device_unregister(cm_dev->device);
3809         kfree(cm_dev);
3810 }
3811
3812 static int __init ib_cm_init(void)
3813 {
3814         int ret;
3815
3816         memset(&cm, 0, sizeof cm);
3817         INIT_LIST_HEAD(&cm.device_list);
3818         rwlock_init(&cm.device_lock);
3819         spin_lock_init(&cm.lock);
3820         cm.listen_service_table = RB_ROOT;
3821         cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
3822         cm.remote_id_table = RB_ROOT;
3823         cm.remote_qp_table = RB_ROOT;
3824         cm.remote_sidr_table = RB_ROOT;
3825         idr_init(&cm.local_id_table);
3826         get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
3827         idr_pre_get(&cm.local_id_table, GFP_KERNEL);
3828         INIT_LIST_HEAD(&cm.timewait_list);
3829
3830         ret = class_register(&cm_class);
3831         if (ret)
3832                 return -ENOMEM;
3833
3834         cm.wq = create_workqueue("ib_cm");
3835         if (!cm.wq) {
3836                 ret = -ENOMEM;
3837                 goto error1;
3838         }
3839
3840         ret = ib_register_client(&cm_client);
3841         if (ret)
3842                 goto error2;
3843
3844         return 0;
3845 error2:
3846         destroy_workqueue(cm.wq);
3847 error1:
3848         class_unregister(&cm_class);
3849         return ret;
3850 }
3851
3852 static void __exit ib_cm_cleanup(void)
3853 {
3854         struct cm_timewait_info *timewait_info, *tmp;
3855
3856         spin_lock_irq(&cm.lock);
3857         list_for_each_entry(timewait_info, &cm.timewait_list, list)
3858                 cancel_delayed_work(&timewait_info->work.work);
3859         spin_unlock_irq(&cm.lock);
3860
3861         ib_unregister_client(&cm_client);
3862         destroy_workqueue(cm.wq);
3863
3864         list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
3865                 list_del(&timewait_info->list);
3866                 kfree(timewait_info);
3867         }
3868
3869         class_unregister(&cm_class);
3870         idr_destroy(&cm.local_id_table);
3871 }
3872
3873 module_init(ib_cm_init);
3874 module_exit(ib_cm_cleanup);
3875