RDMA/cma: Add RDMA_CM_EVENT_TIMEWAIT_EXIT event
[pandora-kernel.git] / drivers / infiniband / core / cma.c
1 /*
2  * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
3  * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4  * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/completion.h>
37 #include <linux/in.h>
38 #include <linux/in6.h>
39 #include <linux/mutex.h>
40 #include <linux/random.h>
41 #include <linux/idr.h>
42 #include <linux/inetdevice.h>
43
44 #include <net/tcp.h>
45
46 #include <rdma/rdma_cm.h>
47 #include <rdma/rdma_cm_ib.h>
48 #include <rdma/ib_cache.h>
49 #include <rdma/ib_cm.h>
50 #include <rdma/ib_sa.h>
51 #include <rdma/iw_cm.h>
52
53 MODULE_AUTHOR("Sean Hefty");
54 MODULE_DESCRIPTION("Generic RDMA CM Agent");
55 MODULE_LICENSE("Dual BSD/GPL");
56
57 #define CMA_CM_RESPONSE_TIMEOUT 20
58 #define CMA_MAX_CM_RETRIES 15
59 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
60
61 static void cma_add_one(struct ib_device *device);
62 static void cma_remove_one(struct ib_device *device);
63
64 static struct ib_client cma_client = {
65         .name   = "cma",
66         .add    = cma_add_one,
67         .remove = cma_remove_one
68 };
69
70 static struct ib_sa_client sa_client;
71 static struct rdma_addr_client addr_client;
72 static LIST_HEAD(dev_list);
73 static LIST_HEAD(listen_any_list);
74 static DEFINE_MUTEX(lock);
75 static struct workqueue_struct *cma_wq;
76 static DEFINE_IDR(sdp_ps);
77 static DEFINE_IDR(tcp_ps);
78 static DEFINE_IDR(udp_ps);
79 static DEFINE_IDR(ipoib_ps);
80 static int next_port;
81
82 struct cma_device {
83         struct list_head        list;
84         struct ib_device        *device;
85         struct completion       comp;
86         atomic_t                refcount;
87         struct list_head        id_list;
88 };
89
90 enum cma_state {
91         CMA_IDLE,
92         CMA_ADDR_QUERY,
93         CMA_ADDR_RESOLVED,
94         CMA_ROUTE_QUERY,
95         CMA_ROUTE_RESOLVED,
96         CMA_CONNECT,
97         CMA_DISCONNECT,
98         CMA_ADDR_BOUND,
99         CMA_LISTEN,
100         CMA_DEVICE_REMOVAL,
101         CMA_DESTROYING
102 };
103
104 struct rdma_bind_list {
105         struct idr              *ps;
106         struct hlist_head       owners;
107         unsigned short          port;
108 };
109
110 /*
111  * Device removal can occur at anytime, so we need extra handling to
112  * serialize notifying the user of device removal with other callbacks.
113  * We do this by disabling removal notification while a callback is in process,
114  * and reporting it after the callback completes.
115  */
116 struct rdma_id_private {
117         struct rdma_cm_id       id;
118
119         struct rdma_bind_list   *bind_list;
120         struct hlist_node       node;
121         struct list_head        list; /* listen_any_list or cma_device.list */
122         struct list_head        listen_list; /* per device listens */
123         struct cma_device       *cma_dev;
124         struct list_head        mc_list;
125
126         int                     internal_id;
127         enum cma_state          state;
128         spinlock_t              lock;
129         struct mutex            qp_mutex;
130
131         struct completion       comp;
132         atomic_t                refcount;
133         struct mutex            handler_mutex;
134
135         int                     backlog;
136         int                     timeout_ms;
137         struct ib_sa_query      *query;
138         int                     query_id;
139         union {
140                 struct ib_cm_id *ib;
141                 struct iw_cm_id *iw;
142         } cm_id;
143
144         u32                     seq_num;
145         u32                     qkey;
146         u32                     qp_num;
147         u8                      srq;
148         u8                      tos;
149 };
150
151 struct cma_multicast {
152         struct rdma_id_private *id_priv;
153         union {
154                 struct ib_sa_multicast *ib;
155         } multicast;
156         struct list_head        list;
157         void                    *context;
158         struct sockaddr         addr;
159         u8                      pad[sizeof(struct sockaddr_in6) -
160                                     sizeof(struct sockaddr)];
161 };
162
163 struct cma_work {
164         struct work_struct      work;
165         struct rdma_id_private  *id;
166         enum cma_state          old_state;
167         enum cma_state          new_state;
168         struct rdma_cm_event    event;
169 };
170
171 struct cma_ndev_work {
172         struct work_struct      work;
173         struct rdma_id_private  *id;
174         struct rdma_cm_event    event;
175 };
176
177 union cma_ip_addr {
178         struct in6_addr ip6;
179         struct {
180                 __be32 pad[3];
181                 __be32 addr;
182         } ip4;
183 };
184
185 struct cma_hdr {
186         u8 cma_version;
187         u8 ip_version;  /* IP version: 7:4 */
188         __be16 port;
189         union cma_ip_addr src_addr;
190         union cma_ip_addr dst_addr;
191 };
192
193 struct sdp_hh {
194         u8 bsdh[16];
195         u8 sdp_version; /* Major version: 7:4 */
196         u8 ip_version;  /* IP version: 7:4 */
197         u8 sdp_specific1[10];
198         __be16 port;
199         __be16 sdp_specific2;
200         union cma_ip_addr src_addr;
201         union cma_ip_addr dst_addr;
202 };
203
204 struct sdp_hah {
205         u8 bsdh[16];
206         u8 sdp_version;
207 };
208
209 #define CMA_VERSION 0x00
210 #define SDP_MAJ_VERSION 0x2
211
212 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
213 {
214         unsigned long flags;
215         int ret;
216
217         spin_lock_irqsave(&id_priv->lock, flags);
218         ret = (id_priv->state == comp);
219         spin_unlock_irqrestore(&id_priv->lock, flags);
220         return ret;
221 }
222
223 static int cma_comp_exch(struct rdma_id_private *id_priv,
224                          enum cma_state comp, enum cma_state exch)
225 {
226         unsigned long flags;
227         int ret;
228
229         spin_lock_irqsave(&id_priv->lock, flags);
230         if ((ret = (id_priv->state == comp)))
231                 id_priv->state = exch;
232         spin_unlock_irqrestore(&id_priv->lock, flags);
233         return ret;
234 }
235
236 static enum cma_state cma_exch(struct rdma_id_private *id_priv,
237                                enum cma_state exch)
238 {
239         unsigned long flags;
240         enum cma_state old;
241
242         spin_lock_irqsave(&id_priv->lock, flags);
243         old = id_priv->state;
244         id_priv->state = exch;
245         spin_unlock_irqrestore(&id_priv->lock, flags);
246         return old;
247 }
248
249 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
250 {
251         return hdr->ip_version >> 4;
252 }
253
254 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
255 {
256         hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
257 }
258
259 static inline u8 sdp_get_majv(u8 sdp_version)
260 {
261         return sdp_version >> 4;
262 }
263
264 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
265 {
266         return hh->ip_version >> 4;
267 }
268
269 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
270 {
271         hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
272 }
273
274 static inline int cma_is_ud_ps(enum rdma_port_space ps)
275 {
276         return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
277 }
278
279 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
280                               struct cma_device *cma_dev)
281 {
282         atomic_inc(&cma_dev->refcount);
283         id_priv->cma_dev = cma_dev;
284         id_priv->id.device = cma_dev->device;
285         list_add_tail(&id_priv->list, &cma_dev->id_list);
286 }
287
288 static inline void cma_deref_dev(struct cma_device *cma_dev)
289 {
290         if (atomic_dec_and_test(&cma_dev->refcount))
291                 complete(&cma_dev->comp);
292 }
293
294 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
295 {
296         list_del(&id_priv->list);
297         cma_deref_dev(id_priv->cma_dev);
298         id_priv->cma_dev = NULL;
299 }
300
301 static int cma_set_qkey(struct ib_device *device, u8 port_num,
302                         enum rdma_port_space ps,
303                         struct rdma_dev_addr *dev_addr, u32 *qkey)
304 {
305         struct ib_sa_mcmember_rec rec;
306         int ret = 0;
307
308         switch (ps) {
309         case RDMA_PS_UDP:
310                 *qkey = RDMA_UDP_QKEY;
311                 break;
312         case RDMA_PS_IPOIB:
313                 ib_addr_get_mgid(dev_addr, &rec.mgid);
314                 ret = ib_sa_get_mcmember_rec(device, port_num, &rec.mgid, &rec);
315                 *qkey = be32_to_cpu(rec.qkey);
316                 break;
317         default:
318                 break;
319         }
320         return ret;
321 }
322
323 static int cma_acquire_dev(struct rdma_id_private *id_priv)
324 {
325         struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
326         struct cma_device *cma_dev;
327         union ib_gid gid;
328         int ret = -ENODEV;
329
330         switch (rdma_node_get_transport(dev_addr->dev_type)) {
331         case RDMA_TRANSPORT_IB:
332                 ib_addr_get_sgid(dev_addr, &gid);
333                 break;
334         case RDMA_TRANSPORT_IWARP:
335                 iw_addr_get_sgid(dev_addr, &gid);
336                 break;
337         default:
338                 return -ENODEV;
339         }
340
341         list_for_each_entry(cma_dev, &dev_list, list) {
342                 ret = ib_find_cached_gid(cma_dev->device, &gid,
343                                          &id_priv->id.port_num, NULL);
344                 if (!ret) {
345                         ret = cma_set_qkey(cma_dev->device,
346                                            id_priv->id.port_num,
347                                            id_priv->id.ps, dev_addr,
348                                            &id_priv->qkey);
349                         if (!ret)
350                                 cma_attach_to_dev(id_priv, cma_dev);
351                         break;
352                 }
353         }
354         return ret;
355 }
356
357 static void cma_deref_id(struct rdma_id_private *id_priv)
358 {
359         if (atomic_dec_and_test(&id_priv->refcount))
360                 complete(&id_priv->comp);
361 }
362
363 static int cma_disable_callback(struct rdma_id_private *id_priv,
364                               enum cma_state state)
365 {
366         mutex_lock(&id_priv->handler_mutex);
367         if (id_priv->state != state) {
368                 mutex_unlock(&id_priv->handler_mutex);
369                 return -EINVAL;
370         }
371         return 0;
372 }
373
374 static int cma_has_cm_dev(struct rdma_id_private *id_priv)
375 {
376         return (id_priv->id.device && id_priv->cm_id.ib);
377 }
378
379 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
380                                   void *context, enum rdma_port_space ps)
381 {
382         struct rdma_id_private *id_priv;
383
384         id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
385         if (!id_priv)
386                 return ERR_PTR(-ENOMEM);
387
388         id_priv->state = CMA_IDLE;
389         id_priv->id.context = context;
390         id_priv->id.event_handler = event_handler;
391         id_priv->id.ps = ps;
392         spin_lock_init(&id_priv->lock);
393         mutex_init(&id_priv->qp_mutex);
394         init_completion(&id_priv->comp);
395         atomic_set(&id_priv->refcount, 1);
396         mutex_init(&id_priv->handler_mutex);
397         INIT_LIST_HEAD(&id_priv->listen_list);
398         INIT_LIST_HEAD(&id_priv->mc_list);
399         get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
400
401         return &id_priv->id;
402 }
403 EXPORT_SYMBOL(rdma_create_id);
404
405 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
406 {
407         struct ib_qp_attr qp_attr;
408         int qp_attr_mask, ret;
409
410         qp_attr.qp_state = IB_QPS_INIT;
411         ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
412         if (ret)
413                 return ret;
414
415         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
416         if (ret)
417                 return ret;
418
419         qp_attr.qp_state = IB_QPS_RTR;
420         ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
421         if (ret)
422                 return ret;
423
424         qp_attr.qp_state = IB_QPS_RTS;
425         qp_attr.sq_psn = 0;
426         ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
427
428         return ret;
429 }
430
431 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
432 {
433         struct ib_qp_attr qp_attr;
434         int qp_attr_mask, ret;
435
436         qp_attr.qp_state = IB_QPS_INIT;
437         ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
438         if (ret)
439                 return ret;
440
441         return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
442 }
443
444 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
445                    struct ib_qp_init_attr *qp_init_attr)
446 {
447         struct rdma_id_private *id_priv;
448         struct ib_qp *qp;
449         int ret;
450
451         id_priv = container_of(id, struct rdma_id_private, id);
452         if (id->device != pd->device)
453                 return -EINVAL;
454
455         qp = ib_create_qp(pd, qp_init_attr);
456         if (IS_ERR(qp))
457                 return PTR_ERR(qp);
458
459         if (cma_is_ud_ps(id_priv->id.ps))
460                 ret = cma_init_ud_qp(id_priv, qp);
461         else
462                 ret = cma_init_conn_qp(id_priv, qp);
463         if (ret)
464                 goto err;
465
466         id->qp = qp;
467         id_priv->qp_num = qp->qp_num;
468         id_priv->srq = (qp->srq != NULL);
469         return 0;
470 err:
471         ib_destroy_qp(qp);
472         return ret;
473 }
474 EXPORT_SYMBOL(rdma_create_qp);
475
476 void rdma_destroy_qp(struct rdma_cm_id *id)
477 {
478         struct rdma_id_private *id_priv;
479
480         id_priv = container_of(id, struct rdma_id_private, id);
481         mutex_lock(&id_priv->qp_mutex);
482         ib_destroy_qp(id_priv->id.qp);
483         id_priv->id.qp = NULL;
484         mutex_unlock(&id_priv->qp_mutex);
485 }
486 EXPORT_SYMBOL(rdma_destroy_qp);
487
488 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
489                              struct rdma_conn_param *conn_param)
490 {
491         struct ib_qp_attr qp_attr;
492         int qp_attr_mask, ret;
493
494         mutex_lock(&id_priv->qp_mutex);
495         if (!id_priv->id.qp) {
496                 ret = 0;
497                 goto out;
498         }
499
500         /* Need to update QP attributes from default values. */
501         qp_attr.qp_state = IB_QPS_INIT;
502         ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
503         if (ret)
504                 goto out;
505
506         ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
507         if (ret)
508                 goto out;
509
510         qp_attr.qp_state = IB_QPS_RTR;
511         ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
512         if (ret)
513                 goto out;
514
515         if (conn_param)
516                 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
517         ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
518 out:
519         mutex_unlock(&id_priv->qp_mutex);
520         return ret;
521 }
522
523 static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
524                              struct rdma_conn_param *conn_param)
525 {
526         struct ib_qp_attr qp_attr;
527         int qp_attr_mask, ret;
528
529         mutex_lock(&id_priv->qp_mutex);
530         if (!id_priv->id.qp) {
531                 ret = 0;
532                 goto out;
533         }
534
535         qp_attr.qp_state = IB_QPS_RTS;
536         ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
537         if (ret)
538                 goto out;
539
540         if (conn_param)
541                 qp_attr.max_rd_atomic = conn_param->initiator_depth;
542         ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
543 out:
544         mutex_unlock(&id_priv->qp_mutex);
545         return ret;
546 }
547
548 static int cma_modify_qp_err(struct rdma_id_private *id_priv)
549 {
550         struct ib_qp_attr qp_attr;
551         int ret;
552
553         mutex_lock(&id_priv->qp_mutex);
554         if (!id_priv->id.qp) {
555                 ret = 0;
556                 goto out;
557         }
558
559         qp_attr.qp_state = IB_QPS_ERR;
560         ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
561 out:
562         mutex_unlock(&id_priv->qp_mutex);
563         return ret;
564 }
565
566 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
567                                struct ib_qp_attr *qp_attr, int *qp_attr_mask)
568 {
569         struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
570         int ret;
571
572         ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
573                                   ib_addr_get_pkey(dev_addr),
574                                   &qp_attr->pkey_index);
575         if (ret)
576                 return ret;
577
578         qp_attr->port_num = id_priv->id.port_num;
579         *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
580
581         if (cma_is_ud_ps(id_priv->id.ps)) {
582                 qp_attr->qkey = id_priv->qkey;
583                 *qp_attr_mask |= IB_QP_QKEY;
584         } else {
585                 qp_attr->qp_access_flags = 0;
586                 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
587         }
588         return 0;
589 }
590
591 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
592                        int *qp_attr_mask)
593 {
594         struct rdma_id_private *id_priv;
595         int ret = 0;
596
597         id_priv = container_of(id, struct rdma_id_private, id);
598         switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
599         case RDMA_TRANSPORT_IB:
600                 if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
601                         ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
602                 else
603                         ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
604                                                  qp_attr_mask);
605                 if (qp_attr->qp_state == IB_QPS_RTR)
606                         qp_attr->rq_psn = id_priv->seq_num;
607                 break;
608         case RDMA_TRANSPORT_IWARP:
609                 if (!id_priv->cm_id.iw) {
610                         qp_attr->qp_access_flags = 0;
611                         *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
612                 } else
613                         ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
614                                                  qp_attr_mask);
615                 break;
616         default:
617                 ret = -ENOSYS;
618                 break;
619         }
620
621         return ret;
622 }
623 EXPORT_SYMBOL(rdma_init_qp_attr);
624
625 static inline int cma_zero_addr(struct sockaddr *addr)
626 {
627         struct in6_addr *ip6;
628
629         if (addr->sa_family == AF_INET)
630                 return ipv4_is_zeronet(
631                         ((struct sockaddr_in *)addr)->sin_addr.s_addr);
632         else {
633                 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
634                 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
635                         ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
636         }
637 }
638
639 static inline int cma_loopback_addr(struct sockaddr *addr)
640 {
641         return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
642 }
643
644 static inline int cma_any_addr(struct sockaddr *addr)
645 {
646         return cma_zero_addr(addr) || cma_loopback_addr(addr);
647 }
648
649 static inline __be16 cma_port(struct sockaddr *addr)
650 {
651         if (addr->sa_family == AF_INET)
652                 return ((struct sockaddr_in *) addr)->sin_port;
653         else
654                 return ((struct sockaddr_in6 *) addr)->sin6_port;
655 }
656
657 static inline int cma_any_port(struct sockaddr *addr)
658 {
659         return !cma_port(addr);
660 }
661
662 static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
663                             u8 *ip_ver, __be16 *port,
664                             union cma_ip_addr **src, union cma_ip_addr **dst)
665 {
666         switch (ps) {
667         case RDMA_PS_SDP:
668                 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
669                     SDP_MAJ_VERSION)
670                         return -EINVAL;
671
672                 *ip_ver = sdp_get_ip_ver(hdr);
673                 *port   = ((struct sdp_hh *) hdr)->port;
674                 *src    = &((struct sdp_hh *) hdr)->src_addr;
675                 *dst    = &((struct sdp_hh *) hdr)->dst_addr;
676                 break;
677         default:
678                 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
679                         return -EINVAL;
680
681                 *ip_ver = cma_get_ip_ver(hdr);
682                 *port   = ((struct cma_hdr *) hdr)->port;
683                 *src    = &((struct cma_hdr *) hdr)->src_addr;
684                 *dst    = &((struct cma_hdr *) hdr)->dst_addr;
685                 break;
686         }
687
688         if (*ip_ver != 4 && *ip_ver != 6)
689                 return -EINVAL;
690         return 0;
691 }
692
693 static void cma_save_net_info(struct rdma_addr *addr,
694                               struct rdma_addr *listen_addr,
695                               u8 ip_ver, __be16 port,
696                               union cma_ip_addr *src, union cma_ip_addr *dst)
697 {
698         struct sockaddr_in *listen4, *ip4;
699         struct sockaddr_in6 *listen6, *ip6;
700
701         switch (ip_ver) {
702         case 4:
703                 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
704                 ip4 = (struct sockaddr_in *) &addr->src_addr;
705                 ip4->sin_family = listen4->sin_family;
706                 ip4->sin_addr.s_addr = dst->ip4.addr;
707                 ip4->sin_port = listen4->sin_port;
708
709                 ip4 = (struct sockaddr_in *) &addr->dst_addr;
710                 ip4->sin_family = listen4->sin_family;
711                 ip4->sin_addr.s_addr = src->ip4.addr;
712                 ip4->sin_port = port;
713                 break;
714         case 6:
715                 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
716                 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
717                 ip6->sin6_family = listen6->sin6_family;
718                 ip6->sin6_addr = dst->ip6;
719                 ip6->sin6_port = listen6->sin6_port;
720
721                 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
722                 ip6->sin6_family = listen6->sin6_family;
723                 ip6->sin6_addr = src->ip6;
724                 ip6->sin6_port = port;
725                 break;
726         default:
727                 break;
728         }
729 }
730
731 static inline int cma_user_data_offset(enum rdma_port_space ps)
732 {
733         switch (ps) {
734         case RDMA_PS_SDP:
735                 return 0;
736         default:
737                 return sizeof(struct cma_hdr);
738         }
739 }
740
741 static void cma_cancel_route(struct rdma_id_private *id_priv)
742 {
743         switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
744         case RDMA_TRANSPORT_IB:
745                 if (id_priv->query)
746                         ib_sa_cancel_query(id_priv->query_id, id_priv->query);
747                 break;
748         default:
749                 break;
750         }
751 }
752
753 static void cma_cancel_listens(struct rdma_id_private *id_priv)
754 {
755         struct rdma_id_private *dev_id_priv;
756
757         /*
758          * Remove from listen_any_list to prevent added devices from spawning
759          * additional listen requests.
760          */
761         mutex_lock(&lock);
762         list_del(&id_priv->list);
763
764         while (!list_empty(&id_priv->listen_list)) {
765                 dev_id_priv = list_entry(id_priv->listen_list.next,
766                                          struct rdma_id_private, listen_list);
767                 /* sync with device removal to avoid duplicate destruction */
768                 list_del_init(&dev_id_priv->list);
769                 list_del(&dev_id_priv->listen_list);
770                 mutex_unlock(&lock);
771
772                 rdma_destroy_id(&dev_id_priv->id);
773                 mutex_lock(&lock);
774         }
775         mutex_unlock(&lock);
776 }
777
778 static void cma_cancel_operation(struct rdma_id_private *id_priv,
779                                  enum cma_state state)
780 {
781         switch (state) {
782         case CMA_ADDR_QUERY:
783                 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
784                 break;
785         case CMA_ROUTE_QUERY:
786                 cma_cancel_route(id_priv);
787                 break;
788         case CMA_LISTEN:
789                 if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
790                     !id_priv->cma_dev)
791                         cma_cancel_listens(id_priv);
792                 break;
793         default:
794                 break;
795         }
796 }
797
798 static void cma_release_port(struct rdma_id_private *id_priv)
799 {
800         struct rdma_bind_list *bind_list = id_priv->bind_list;
801
802         if (!bind_list)
803                 return;
804
805         mutex_lock(&lock);
806         hlist_del(&id_priv->node);
807         if (hlist_empty(&bind_list->owners)) {
808                 idr_remove(bind_list->ps, bind_list->port);
809                 kfree(bind_list);
810         }
811         mutex_unlock(&lock);
812 }
813
814 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
815 {
816         struct cma_multicast *mc;
817
818         while (!list_empty(&id_priv->mc_list)) {
819                 mc = container_of(id_priv->mc_list.next,
820                                   struct cma_multicast, list);
821                 list_del(&mc->list);
822                 ib_sa_free_multicast(mc->multicast.ib);
823                 kfree(mc);
824         }
825 }
826
827 void rdma_destroy_id(struct rdma_cm_id *id)
828 {
829         struct rdma_id_private *id_priv;
830         enum cma_state state;
831
832         id_priv = container_of(id, struct rdma_id_private, id);
833         state = cma_exch(id_priv, CMA_DESTROYING);
834         cma_cancel_operation(id_priv, state);
835
836         mutex_lock(&lock);
837         if (id_priv->cma_dev) {
838                 mutex_unlock(&lock);
839                 switch (rdma_node_get_transport(id->device->node_type)) {
840                 case RDMA_TRANSPORT_IB:
841                         if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
842                                 ib_destroy_cm_id(id_priv->cm_id.ib);
843                         break;
844                 case RDMA_TRANSPORT_IWARP:
845                         if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
846                                 iw_destroy_cm_id(id_priv->cm_id.iw);
847                         break;
848                 default:
849                         break;
850                 }
851                 cma_leave_mc_groups(id_priv);
852                 mutex_lock(&lock);
853                 cma_detach_from_dev(id_priv);
854         }
855         mutex_unlock(&lock);
856
857         cma_release_port(id_priv);
858         cma_deref_id(id_priv);
859         wait_for_completion(&id_priv->comp);
860
861         if (id_priv->internal_id)
862                 cma_deref_id(id_priv->id.context);
863
864         kfree(id_priv->id.route.path_rec);
865         kfree(id_priv);
866 }
867 EXPORT_SYMBOL(rdma_destroy_id);
868
869 static int cma_rep_recv(struct rdma_id_private *id_priv)
870 {
871         int ret;
872
873         ret = cma_modify_qp_rtr(id_priv, NULL);
874         if (ret)
875                 goto reject;
876
877         ret = cma_modify_qp_rts(id_priv, NULL);
878         if (ret)
879                 goto reject;
880
881         ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
882         if (ret)
883                 goto reject;
884
885         return 0;
886 reject:
887         cma_modify_qp_err(id_priv);
888         ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
889                        NULL, 0, NULL, 0);
890         return ret;
891 }
892
893 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
894 {
895         if (id_priv->id.ps == RDMA_PS_SDP &&
896             sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
897             SDP_MAJ_VERSION)
898                 return -EINVAL;
899
900         return 0;
901 }
902
903 static void cma_set_rep_event_data(struct rdma_cm_event *event,
904                                    struct ib_cm_rep_event_param *rep_data,
905                                    void *private_data)
906 {
907         event->param.conn.private_data = private_data;
908         event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
909         event->param.conn.responder_resources = rep_data->responder_resources;
910         event->param.conn.initiator_depth = rep_data->initiator_depth;
911         event->param.conn.flow_control = rep_data->flow_control;
912         event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
913         event->param.conn.srq = rep_data->srq;
914         event->param.conn.qp_num = rep_data->remote_qpn;
915 }
916
917 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
918 {
919         struct rdma_id_private *id_priv = cm_id->context;
920         struct rdma_cm_event event;
921         int ret = 0;
922
923         if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
924                 cma_disable_callback(id_priv, CMA_CONNECT)) ||
925             (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
926                 cma_disable_callback(id_priv, CMA_DISCONNECT)))
927                 return 0;
928
929         memset(&event, 0, sizeof event);
930         switch (ib_event->event) {
931         case IB_CM_REQ_ERROR:
932         case IB_CM_REP_ERROR:
933                 event.event = RDMA_CM_EVENT_UNREACHABLE;
934                 event.status = -ETIMEDOUT;
935                 break;
936         case IB_CM_REP_RECEIVED:
937                 event.status = cma_verify_rep(id_priv, ib_event->private_data);
938                 if (event.status)
939                         event.event = RDMA_CM_EVENT_CONNECT_ERROR;
940                 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
941                         event.status = cma_rep_recv(id_priv);
942                         event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
943                                                      RDMA_CM_EVENT_ESTABLISHED;
944                 } else
945                         event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
946                 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
947                                        ib_event->private_data);
948                 break;
949         case IB_CM_RTU_RECEIVED:
950         case IB_CM_USER_ESTABLISHED:
951                 event.event = RDMA_CM_EVENT_ESTABLISHED;
952                 break;
953         case IB_CM_DREQ_ERROR:
954                 event.status = -ETIMEDOUT; /* fall through */
955         case IB_CM_DREQ_RECEIVED:
956         case IB_CM_DREP_RECEIVED:
957                 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
958                         goto out;
959                 event.event = RDMA_CM_EVENT_DISCONNECTED;
960                 break;
961         case IB_CM_TIMEWAIT_EXIT:
962                 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
963                 break;
964         case IB_CM_MRA_RECEIVED:
965                 /* ignore event */
966                 goto out;
967         case IB_CM_REJ_RECEIVED:
968                 cma_modify_qp_err(id_priv);
969                 event.status = ib_event->param.rej_rcvd.reason;
970                 event.event = RDMA_CM_EVENT_REJECTED;
971                 event.param.conn.private_data = ib_event->private_data;
972                 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
973                 break;
974         default:
975                 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
976                        ib_event->event);
977                 goto out;
978         }
979
980         ret = id_priv->id.event_handler(&id_priv->id, &event);
981         if (ret) {
982                 /* Destroy the CM ID by returning a non-zero value. */
983                 id_priv->cm_id.ib = NULL;
984                 cma_exch(id_priv, CMA_DESTROYING);
985                 mutex_unlock(&id_priv->handler_mutex);
986                 rdma_destroy_id(&id_priv->id);
987                 return ret;
988         }
989 out:
990         mutex_unlock(&id_priv->handler_mutex);
991         return ret;
992 }
993
994 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
995                                                struct ib_cm_event *ib_event)
996 {
997         struct rdma_id_private *id_priv;
998         struct rdma_cm_id *id;
999         struct rdma_route *rt;
1000         union cma_ip_addr *src, *dst;
1001         __be16 port;
1002         u8 ip_ver;
1003         int ret;
1004
1005         if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1006                              &ip_ver, &port, &src, &dst))
1007                 goto err;
1008
1009         id = rdma_create_id(listen_id->event_handler, listen_id->context,
1010                             listen_id->ps);
1011         if (IS_ERR(id))
1012                 goto err;
1013
1014         cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1015                           ip_ver, port, src, dst);
1016
1017         rt = &id->route;
1018         rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
1019         rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1020                                GFP_KERNEL);
1021         if (!rt->path_rec)
1022                 goto destroy_id;
1023
1024         rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1025         if (rt->num_paths == 2)
1026                 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
1027
1028         ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1029         ret = rdma_translate_ip(&id->route.addr.src_addr,
1030                                 &id->route.addr.dev_addr);
1031         if (ret)
1032                 goto destroy_id;
1033
1034         id_priv = container_of(id, struct rdma_id_private, id);
1035         id_priv->state = CMA_CONNECT;
1036         return id_priv;
1037
1038 destroy_id:
1039         rdma_destroy_id(id);
1040 err:
1041         return NULL;
1042 }
1043
1044 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
1045                                               struct ib_cm_event *ib_event)
1046 {
1047         struct rdma_id_private *id_priv;
1048         struct rdma_cm_id *id;
1049         union cma_ip_addr *src, *dst;
1050         __be16 port;
1051         u8 ip_ver;
1052         int ret;
1053
1054         id = rdma_create_id(listen_id->event_handler, listen_id->context,
1055                             listen_id->ps);
1056         if (IS_ERR(id))
1057                 return NULL;
1058
1059
1060         if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1061                              &ip_ver, &port, &src, &dst))
1062                 goto err;
1063
1064         cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1065                           ip_ver, port, src, dst);
1066
1067         ret = rdma_translate_ip(&id->route.addr.src_addr,
1068                                 &id->route.addr.dev_addr);
1069         if (ret)
1070                 goto err;
1071
1072         id_priv = container_of(id, struct rdma_id_private, id);
1073         id_priv->state = CMA_CONNECT;
1074         return id_priv;
1075 err:
1076         rdma_destroy_id(id);
1077         return NULL;
1078 }
1079
1080 static void cma_set_req_event_data(struct rdma_cm_event *event,
1081                                    struct ib_cm_req_event_param *req_data,
1082                                    void *private_data, int offset)
1083 {
1084         event->param.conn.private_data = private_data + offset;
1085         event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
1086         event->param.conn.responder_resources = req_data->responder_resources;
1087         event->param.conn.initiator_depth = req_data->initiator_depth;
1088         event->param.conn.flow_control = req_data->flow_control;
1089         event->param.conn.retry_count = req_data->retry_count;
1090         event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
1091         event->param.conn.srq = req_data->srq;
1092         event->param.conn.qp_num = req_data->remote_qpn;
1093 }
1094
1095 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1096 {
1097         struct rdma_id_private *listen_id, *conn_id;
1098         struct rdma_cm_event event;
1099         int offset, ret;
1100
1101         listen_id = cm_id->context;
1102         if (cma_disable_callback(listen_id, CMA_LISTEN))
1103                 return -ECONNABORTED;
1104
1105         memset(&event, 0, sizeof event);
1106         offset = cma_user_data_offset(listen_id->id.ps);
1107         event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1108         if (cma_is_ud_ps(listen_id->id.ps)) {
1109                 conn_id = cma_new_udp_id(&listen_id->id, ib_event);
1110                 event.param.ud.private_data = ib_event->private_data + offset;
1111                 event.param.ud.private_data_len =
1112                                 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
1113         } else {
1114                 conn_id = cma_new_conn_id(&listen_id->id, ib_event);
1115                 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
1116                                        ib_event->private_data, offset);
1117         }
1118         if (!conn_id) {
1119                 ret = -ENOMEM;
1120                 goto out;
1121         }
1122
1123         mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1124         mutex_lock(&lock);
1125         ret = cma_acquire_dev(conn_id);
1126         mutex_unlock(&lock);
1127         if (ret)
1128                 goto release_conn_id;
1129
1130         conn_id->cm_id.ib = cm_id;
1131         cm_id->context = conn_id;
1132         cm_id->cm_handler = cma_ib_handler;
1133
1134         ret = conn_id->id.event_handler(&conn_id->id, &event);
1135         if (!ret) {
1136                 /*
1137                  * Acquire mutex to prevent user executing rdma_destroy_id()
1138                  * while we're accessing the cm_id.
1139                  */
1140                 mutex_lock(&lock);
1141                 if (cma_comp(conn_id, CMA_CONNECT) &&
1142                     !cma_is_ud_ps(conn_id->id.ps))
1143                         ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1144                 mutex_unlock(&lock);
1145                 mutex_unlock(&conn_id->handler_mutex);
1146                 goto out;
1147         }
1148
1149         /* Destroy the CM ID by returning a non-zero value. */
1150         conn_id->cm_id.ib = NULL;
1151
1152 release_conn_id:
1153         cma_exch(conn_id, CMA_DESTROYING);
1154         mutex_unlock(&conn_id->handler_mutex);
1155         rdma_destroy_id(&conn_id->id);
1156
1157 out:
1158         mutex_unlock(&listen_id->handler_mutex);
1159         return ret;
1160 }
1161
1162 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
1163 {
1164         return cpu_to_be64(((u64)ps << 16) + be16_to_cpu(cma_port(addr)));
1165 }
1166
1167 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
1168                                  struct ib_cm_compare_data *compare)
1169 {
1170         struct cma_hdr *cma_data, *cma_mask;
1171         struct sdp_hh *sdp_data, *sdp_mask;
1172         __be32 ip4_addr;
1173         struct in6_addr ip6_addr;
1174
1175         memset(compare, 0, sizeof *compare);
1176         cma_data = (void *) compare->data;
1177         cma_mask = (void *) compare->mask;
1178         sdp_data = (void *) compare->data;
1179         sdp_mask = (void *) compare->mask;
1180
1181         switch (addr->sa_family) {
1182         case AF_INET:
1183                 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
1184                 if (ps == RDMA_PS_SDP) {
1185                         sdp_set_ip_ver(sdp_data, 4);
1186                         sdp_set_ip_ver(sdp_mask, 0xF);
1187                         sdp_data->dst_addr.ip4.addr = ip4_addr;
1188                         sdp_mask->dst_addr.ip4.addr = htonl(~0);
1189                 } else {
1190                         cma_set_ip_ver(cma_data, 4);
1191                         cma_set_ip_ver(cma_mask, 0xF);
1192                         cma_data->dst_addr.ip4.addr = ip4_addr;
1193                         cma_mask->dst_addr.ip4.addr = htonl(~0);
1194                 }
1195                 break;
1196         case AF_INET6:
1197                 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1198                 if (ps == RDMA_PS_SDP) {
1199                         sdp_set_ip_ver(sdp_data, 6);
1200                         sdp_set_ip_ver(sdp_mask, 0xF);
1201                         sdp_data->dst_addr.ip6 = ip6_addr;
1202                         memset(&sdp_mask->dst_addr.ip6, 0xFF,
1203                                sizeof sdp_mask->dst_addr.ip6);
1204                 } else {
1205                         cma_set_ip_ver(cma_data, 6);
1206                         cma_set_ip_ver(cma_mask, 0xF);
1207                         cma_data->dst_addr.ip6 = ip6_addr;
1208                         memset(&cma_mask->dst_addr.ip6, 0xFF,
1209                                sizeof cma_mask->dst_addr.ip6);
1210                 }
1211                 break;
1212         default:
1213                 break;
1214         }
1215 }
1216
1217 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1218 {
1219         struct rdma_id_private *id_priv = iw_id->context;
1220         struct rdma_cm_event event;
1221         struct sockaddr_in *sin;
1222         int ret = 0;
1223
1224         if (cma_disable_callback(id_priv, CMA_CONNECT))
1225                 return 0;
1226
1227         memset(&event, 0, sizeof event);
1228         switch (iw_event->event) {
1229         case IW_CM_EVENT_CLOSE:
1230                 event.event = RDMA_CM_EVENT_DISCONNECTED;
1231                 break;
1232         case IW_CM_EVENT_CONNECT_REPLY:
1233                 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1234                 *sin = iw_event->local_addr;
1235                 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1236                 *sin = iw_event->remote_addr;
1237                 switch (iw_event->status) {
1238                 case 0:
1239                         event.event = RDMA_CM_EVENT_ESTABLISHED;
1240                         break;
1241                 case -ECONNRESET:
1242                 case -ECONNREFUSED:
1243                         event.event = RDMA_CM_EVENT_REJECTED;
1244                         break;
1245                 case -ETIMEDOUT:
1246                         event.event = RDMA_CM_EVENT_UNREACHABLE;
1247                         break;
1248                 default:
1249                         event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1250                         break;
1251                 }
1252                 break;
1253         case IW_CM_EVENT_ESTABLISHED:
1254                 event.event = RDMA_CM_EVENT_ESTABLISHED;
1255                 break;
1256         default:
1257                 BUG_ON(1);
1258         }
1259
1260         event.status = iw_event->status;
1261         event.param.conn.private_data = iw_event->private_data;
1262         event.param.conn.private_data_len = iw_event->private_data_len;
1263         ret = id_priv->id.event_handler(&id_priv->id, &event);
1264         if (ret) {
1265                 /* Destroy the CM ID by returning a non-zero value. */
1266                 id_priv->cm_id.iw = NULL;
1267                 cma_exch(id_priv, CMA_DESTROYING);
1268                 mutex_unlock(&id_priv->handler_mutex);
1269                 rdma_destroy_id(&id_priv->id);
1270                 return ret;
1271         }
1272
1273         mutex_unlock(&id_priv->handler_mutex);
1274         return ret;
1275 }
1276
1277 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1278                                struct iw_cm_event *iw_event)
1279 {
1280         struct rdma_cm_id *new_cm_id;
1281         struct rdma_id_private *listen_id, *conn_id;
1282         struct sockaddr_in *sin;
1283         struct net_device *dev = NULL;
1284         struct rdma_cm_event event;
1285         int ret;
1286         struct ib_device_attr attr;
1287
1288         listen_id = cm_id->context;
1289         if (cma_disable_callback(listen_id, CMA_LISTEN))
1290                 return -ECONNABORTED;
1291
1292         /* Create a new RDMA id for the new IW CM ID */
1293         new_cm_id = rdma_create_id(listen_id->id.event_handler,
1294                                    listen_id->id.context,
1295                                    RDMA_PS_TCP);
1296         if (IS_ERR(new_cm_id)) {
1297                 ret = -ENOMEM;
1298                 goto out;
1299         }
1300         conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1301         mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1302         conn_id->state = CMA_CONNECT;
1303
1304         dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
1305         if (!dev) {
1306                 ret = -EADDRNOTAVAIL;
1307                 mutex_unlock(&conn_id->handler_mutex);
1308                 rdma_destroy_id(new_cm_id);
1309                 goto out;
1310         }
1311         ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1312         if (ret) {
1313                 mutex_unlock(&conn_id->handler_mutex);
1314                 rdma_destroy_id(new_cm_id);
1315                 goto out;
1316         }
1317
1318         mutex_lock(&lock);
1319         ret = cma_acquire_dev(conn_id);
1320         mutex_unlock(&lock);
1321         if (ret) {
1322                 mutex_unlock(&conn_id->handler_mutex);
1323                 rdma_destroy_id(new_cm_id);
1324                 goto out;
1325         }
1326
1327         conn_id->cm_id.iw = cm_id;
1328         cm_id->context = conn_id;
1329         cm_id->cm_handler = cma_iw_handler;
1330
1331         sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1332         *sin = iw_event->local_addr;
1333         sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1334         *sin = iw_event->remote_addr;
1335
1336         ret = ib_query_device(conn_id->id.device, &attr);
1337         if (ret) {
1338                 mutex_unlock(&conn_id->handler_mutex);
1339                 rdma_destroy_id(new_cm_id);
1340                 goto out;
1341         }
1342
1343         memset(&event, 0, sizeof event);
1344         event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
1345         event.param.conn.private_data = iw_event->private_data;
1346         event.param.conn.private_data_len = iw_event->private_data_len;
1347         event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
1348         event.param.conn.responder_resources = attr.max_qp_rd_atom;
1349         ret = conn_id->id.event_handler(&conn_id->id, &event);
1350         if (ret) {
1351                 /* User wants to destroy the CM ID */
1352                 conn_id->cm_id.iw = NULL;
1353                 cma_exch(conn_id, CMA_DESTROYING);
1354                 mutex_unlock(&conn_id->handler_mutex);
1355                 rdma_destroy_id(&conn_id->id);
1356                 goto out;
1357         }
1358
1359         mutex_unlock(&conn_id->handler_mutex);
1360
1361 out:
1362         if (dev)
1363                 dev_put(dev);
1364         mutex_unlock(&listen_id->handler_mutex);
1365         return ret;
1366 }
1367
1368 static int cma_ib_listen(struct rdma_id_private *id_priv)
1369 {
1370         struct ib_cm_compare_data compare_data;
1371         struct sockaddr *addr;
1372         __be64 svc_id;
1373         int ret;
1374
1375         id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1376                                             id_priv);
1377         if (IS_ERR(id_priv->cm_id.ib))
1378                 return PTR_ERR(id_priv->cm_id.ib);
1379
1380         addr = &id_priv->id.route.addr.src_addr;
1381         svc_id = cma_get_service_id(id_priv->id.ps, addr);
1382         if (cma_any_addr(addr))
1383                 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1384         else {
1385                 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1386                 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1387         }
1388
1389         if (ret) {
1390                 ib_destroy_cm_id(id_priv->cm_id.ib);
1391                 id_priv->cm_id.ib = NULL;
1392         }
1393
1394         return ret;
1395 }
1396
1397 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1398 {
1399         int ret;
1400         struct sockaddr_in *sin;
1401
1402         id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1403                                             iw_conn_req_handler,
1404                                             id_priv);
1405         if (IS_ERR(id_priv->cm_id.iw))
1406                 return PTR_ERR(id_priv->cm_id.iw);
1407
1408         sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1409         id_priv->cm_id.iw->local_addr = *sin;
1410
1411         ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1412
1413         if (ret) {
1414                 iw_destroy_cm_id(id_priv->cm_id.iw);
1415                 id_priv->cm_id.iw = NULL;
1416         }
1417
1418         return ret;
1419 }
1420
1421 static int cma_listen_handler(struct rdma_cm_id *id,
1422                               struct rdma_cm_event *event)
1423 {
1424         struct rdma_id_private *id_priv = id->context;
1425
1426         id->context = id_priv->id.context;
1427         id->event_handler = id_priv->id.event_handler;
1428         return id_priv->id.event_handler(id, event);
1429 }
1430
1431 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1432                               struct cma_device *cma_dev)
1433 {
1434         struct rdma_id_private *dev_id_priv;
1435         struct rdma_cm_id *id;
1436         int ret;
1437
1438         id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1439         if (IS_ERR(id))
1440                 return;
1441
1442         dev_id_priv = container_of(id, struct rdma_id_private, id);
1443
1444         dev_id_priv->state = CMA_ADDR_BOUND;
1445         memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1446                ip_addr_size(&id_priv->id.route.addr.src_addr));
1447
1448         cma_attach_to_dev(dev_id_priv, cma_dev);
1449         list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1450         atomic_inc(&id_priv->refcount);
1451         dev_id_priv->internal_id = 1;
1452
1453         ret = rdma_listen(id, id_priv->backlog);
1454         if (ret)
1455                 printk(KERN_WARNING "RDMA CMA: cma_listen_on_dev, error %d, "
1456                        "listening on device %s\n", ret, cma_dev->device->name);
1457 }
1458
1459 static void cma_listen_on_all(struct rdma_id_private *id_priv)
1460 {
1461         struct cma_device *cma_dev;
1462
1463         mutex_lock(&lock);
1464         list_add_tail(&id_priv->list, &listen_any_list);
1465         list_for_each_entry(cma_dev, &dev_list, list)
1466                 cma_listen_on_dev(id_priv, cma_dev);
1467         mutex_unlock(&lock);
1468 }
1469
1470 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1471 {
1472         struct sockaddr_in addr_in;
1473
1474         memset(&addr_in, 0, sizeof addr_in);
1475         addr_in.sin_family = af;
1476         return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1477 }
1478
1479 int rdma_listen(struct rdma_cm_id *id, int backlog)
1480 {
1481         struct rdma_id_private *id_priv;
1482         int ret;
1483
1484         id_priv = container_of(id, struct rdma_id_private, id);
1485         if (id_priv->state == CMA_IDLE) {
1486                 ret = cma_bind_any(id, AF_INET);
1487                 if (ret)
1488                         return ret;
1489         }
1490
1491         if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1492                 return -EINVAL;
1493
1494         id_priv->backlog = backlog;
1495         if (id->device) {
1496                 switch (rdma_node_get_transport(id->device->node_type)) {
1497                 case RDMA_TRANSPORT_IB:
1498                         ret = cma_ib_listen(id_priv);
1499                         if (ret)
1500                                 goto err;
1501                         break;
1502                 case RDMA_TRANSPORT_IWARP:
1503                         ret = cma_iw_listen(id_priv, backlog);
1504                         if (ret)
1505                                 goto err;
1506                         break;
1507                 default:
1508                         ret = -ENOSYS;
1509                         goto err;
1510                 }
1511         } else
1512                 cma_listen_on_all(id_priv);
1513
1514         return 0;
1515 err:
1516         id_priv->backlog = 0;
1517         cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1518         return ret;
1519 }
1520 EXPORT_SYMBOL(rdma_listen);
1521
1522 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1523 {
1524         struct rdma_id_private *id_priv;
1525
1526         id_priv = container_of(id, struct rdma_id_private, id);
1527         id_priv->tos = (u8) tos;
1528 }
1529 EXPORT_SYMBOL(rdma_set_service_type);
1530
1531 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1532                               void *context)
1533 {
1534         struct cma_work *work = context;
1535         struct rdma_route *route;
1536
1537         route = &work->id->id.route;
1538
1539         if (!status) {
1540                 route->num_paths = 1;
1541                 *route->path_rec = *path_rec;
1542         } else {
1543                 work->old_state = CMA_ROUTE_QUERY;
1544                 work->new_state = CMA_ADDR_RESOLVED;
1545                 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1546                 work->event.status = status;
1547         }
1548
1549         queue_work(cma_wq, &work->work);
1550 }
1551
1552 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1553                               struct cma_work *work)
1554 {
1555         struct rdma_addr *addr = &id_priv->id.route.addr;
1556         struct ib_sa_path_rec path_rec;
1557         ib_sa_comp_mask comp_mask;
1558         struct sockaddr_in6 *sin6;
1559
1560         memset(&path_rec, 0, sizeof path_rec);
1561         ib_addr_get_sgid(&addr->dev_addr, &path_rec.sgid);
1562         ib_addr_get_dgid(&addr->dev_addr, &path_rec.dgid);
1563         path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(&addr->dev_addr));
1564         path_rec.numb_path = 1;
1565         path_rec.reversible = 1;
1566         path_rec.service_id = cma_get_service_id(id_priv->id.ps, &addr->dst_addr);
1567
1568         comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1569                     IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
1570                     IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
1571
1572         if (addr->src_addr.sa_family == AF_INET) {
1573                 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
1574                 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
1575         } else {
1576                 sin6 = (struct sockaddr_in6 *) &addr->src_addr;
1577                 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
1578                 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
1579         }
1580
1581         id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1582                                                id_priv->id.port_num, &path_rec,
1583                                                comp_mask, timeout_ms,
1584                                                GFP_KERNEL, cma_query_handler,
1585                                                work, &id_priv->query);
1586
1587         return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1588 }
1589
1590 static void cma_work_handler(struct work_struct *_work)
1591 {
1592         struct cma_work *work = container_of(_work, struct cma_work, work);
1593         struct rdma_id_private *id_priv = work->id;
1594         int destroy = 0;
1595
1596         mutex_lock(&id_priv->handler_mutex);
1597         if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1598                 goto out;
1599
1600         if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1601                 cma_exch(id_priv, CMA_DESTROYING);
1602                 destroy = 1;
1603         }
1604 out:
1605         mutex_unlock(&id_priv->handler_mutex);
1606         cma_deref_id(id_priv);
1607         if (destroy)
1608                 rdma_destroy_id(&id_priv->id);
1609         kfree(work);
1610 }
1611
1612 static void cma_ndev_work_handler(struct work_struct *_work)
1613 {
1614         struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
1615         struct rdma_id_private *id_priv = work->id;
1616         int destroy = 0;
1617
1618         mutex_lock(&id_priv->handler_mutex);
1619         if (id_priv->state == CMA_DESTROYING ||
1620             id_priv->state == CMA_DEVICE_REMOVAL)
1621                 goto out;
1622
1623         if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1624                 cma_exch(id_priv, CMA_DESTROYING);
1625                 destroy = 1;
1626         }
1627
1628 out:
1629         mutex_unlock(&id_priv->handler_mutex);
1630         cma_deref_id(id_priv);
1631         if (destroy)
1632                 rdma_destroy_id(&id_priv->id);
1633         kfree(work);
1634 }
1635
1636 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1637 {
1638         struct rdma_route *route = &id_priv->id.route;
1639         struct cma_work *work;
1640         int ret;
1641
1642         work = kzalloc(sizeof *work, GFP_KERNEL);
1643         if (!work)
1644                 return -ENOMEM;
1645
1646         work->id = id_priv;
1647         INIT_WORK(&work->work, cma_work_handler);
1648         work->old_state = CMA_ROUTE_QUERY;
1649         work->new_state = CMA_ROUTE_RESOLVED;
1650         work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1651
1652         route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1653         if (!route->path_rec) {
1654                 ret = -ENOMEM;
1655                 goto err1;
1656         }
1657
1658         ret = cma_query_ib_route(id_priv, timeout_ms, work);
1659         if (ret)
1660                 goto err2;
1661
1662         return 0;
1663 err2:
1664         kfree(route->path_rec);
1665         route->path_rec = NULL;
1666 err1:
1667         kfree(work);
1668         return ret;
1669 }
1670
1671 int rdma_set_ib_paths(struct rdma_cm_id *id,
1672                       struct ib_sa_path_rec *path_rec, int num_paths)
1673 {
1674         struct rdma_id_private *id_priv;
1675         int ret;
1676
1677         id_priv = container_of(id, struct rdma_id_private, id);
1678         if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1679                 return -EINVAL;
1680
1681         id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1682         if (!id->route.path_rec) {
1683                 ret = -ENOMEM;
1684                 goto err;
1685         }
1686
1687         memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1688         return 0;
1689 err:
1690         cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
1691         return ret;
1692 }
1693 EXPORT_SYMBOL(rdma_set_ib_paths);
1694
1695 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1696 {
1697         struct cma_work *work;
1698
1699         work = kzalloc(sizeof *work, GFP_KERNEL);
1700         if (!work)
1701                 return -ENOMEM;
1702
1703         work->id = id_priv;
1704         INIT_WORK(&work->work, cma_work_handler);
1705         work->old_state = CMA_ROUTE_QUERY;
1706         work->new_state = CMA_ROUTE_RESOLVED;
1707         work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1708         queue_work(cma_wq, &work->work);
1709         return 0;
1710 }
1711
1712 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1713 {
1714         struct rdma_id_private *id_priv;
1715         int ret;
1716
1717         id_priv = container_of(id, struct rdma_id_private, id);
1718         if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
1719                 return -EINVAL;
1720
1721         atomic_inc(&id_priv->refcount);
1722         switch (rdma_node_get_transport(id->device->node_type)) {
1723         case RDMA_TRANSPORT_IB:
1724                 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1725                 break;
1726         case RDMA_TRANSPORT_IWARP:
1727                 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1728                 break;
1729         default:
1730                 ret = -ENOSYS;
1731                 break;
1732         }
1733         if (ret)
1734                 goto err;
1735
1736         return 0;
1737 err:
1738         cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
1739         cma_deref_id(id_priv);
1740         return ret;
1741 }
1742 EXPORT_SYMBOL(rdma_resolve_route);
1743
1744 static int cma_bind_loopback(struct rdma_id_private *id_priv)
1745 {
1746         struct cma_device *cma_dev;
1747         struct ib_port_attr port_attr;
1748         union ib_gid gid;
1749         u16 pkey;
1750         int ret;
1751         u8 p;
1752
1753         mutex_lock(&lock);
1754         if (list_empty(&dev_list)) {
1755                 ret = -ENODEV;
1756                 goto out;
1757         }
1758         list_for_each_entry(cma_dev, &dev_list, list)
1759                 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1760                         if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1761                             port_attr.state == IB_PORT_ACTIVE)
1762                                 goto port_found;
1763
1764         p = 1;
1765         cma_dev = list_entry(dev_list.next, struct cma_device, list);
1766
1767 port_found:
1768         ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1769         if (ret)
1770                 goto out;
1771
1772         ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1773         if (ret)
1774                 goto out;
1775
1776         ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1777         ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1778         id_priv->id.port_num = p;
1779         cma_attach_to_dev(id_priv, cma_dev);
1780 out:
1781         mutex_unlock(&lock);
1782         return ret;
1783 }
1784
1785 static void addr_handler(int status, struct sockaddr *src_addr,
1786                          struct rdma_dev_addr *dev_addr, void *context)
1787 {
1788         struct rdma_id_private *id_priv = context;
1789         struct rdma_cm_event event;
1790
1791         memset(&event, 0, sizeof event);
1792         mutex_lock(&id_priv->handler_mutex);
1793
1794         /*
1795          * Grab mutex to block rdma_destroy_id() from removing the device while
1796          * we're trying to acquire it.
1797          */
1798         mutex_lock(&lock);
1799         if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1800                 mutex_unlock(&lock);
1801                 goto out;
1802         }
1803
1804         if (!status && !id_priv->cma_dev)
1805                 status = cma_acquire_dev(id_priv);
1806         mutex_unlock(&lock);
1807
1808         if (status) {
1809                 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1810                         goto out;
1811                 event.event = RDMA_CM_EVENT_ADDR_ERROR;
1812                 event.status = status;
1813         } else {
1814                 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1815                        ip_addr_size(src_addr));
1816                 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1817         }
1818
1819         if (id_priv->id.event_handler(&id_priv->id, &event)) {
1820                 cma_exch(id_priv, CMA_DESTROYING);
1821                 mutex_unlock(&id_priv->handler_mutex);
1822                 cma_deref_id(id_priv);
1823                 rdma_destroy_id(&id_priv->id);
1824                 return;
1825         }
1826 out:
1827         mutex_unlock(&id_priv->handler_mutex);
1828         cma_deref_id(id_priv);
1829 }
1830
1831 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1832 {
1833         struct cma_work *work;
1834         struct sockaddr_in *src_in, *dst_in;
1835         union ib_gid gid;
1836         int ret;
1837
1838         work = kzalloc(sizeof *work, GFP_KERNEL);
1839         if (!work)
1840                 return -ENOMEM;
1841
1842         if (!id_priv->cma_dev) {
1843                 ret = cma_bind_loopback(id_priv);
1844                 if (ret)
1845                         goto err;
1846         }
1847
1848         ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1849         ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1850
1851         if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1852                 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1853                 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1854                 src_in->sin_family = dst_in->sin_family;
1855                 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
1856         }
1857
1858         work->id = id_priv;
1859         INIT_WORK(&work->work, cma_work_handler);
1860         work->old_state = CMA_ADDR_QUERY;
1861         work->new_state = CMA_ADDR_RESOLVED;
1862         work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1863         queue_work(cma_wq, &work->work);
1864         return 0;
1865 err:
1866         kfree(work);
1867         return ret;
1868 }
1869
1870 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1871                          struct sockaddr *dst_addr)
1872 {
1873         if (src_addr && src_addr->sa_family)
1874                 return rdma_bind_addr(id, src_addr);
1875         else
1876                 return cma_bind_any(id, dst_addr->sa_family);
1877 }
1878
1879 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1880                       struct sockaddr *dst_addr, int timeout_ms)
1881 {
1882         struct rdma_id_private *id_priv;
1883         int ret;
1884
1885         id_priv = container_of(id, struct rdma_id_private, id);
1886         if (id_priv->state == CMA_IDLE) {
1887                 ret = cma_bind_addr(id, src_addr, dst_addr);
1888                 if (ret)
1889                         return ret;
1890         }
1891
1892         if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
1893                 return -EINVAL;
1894
1895         atomic_inc(&id_priv->refcount);
1896         memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1897         if (cma_any_addr(dst_addr))
1898                 ret = cma_resolve_loopback(id_priv);
1899         else
1900                 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1901                                       dst_addr, &id->route.addr.dev_addr,
1902                                       timeout_ms, addr_handler, id_priv);
1903         if (ret)
1904                 goto err;
1905
1906         return 0;
1907 err:
1908         cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
1909         cma_deref_id(id_priv);
1910         return ret;
1911 }
1912 EXPORT_SYMBOL(rdma_resolve_addr);
1913
1914 static void cma_bind_port(struct rdma_bind_list *bind_list,
1915                           struct rdma_id_private *id_priv)
1916 {
1917         struct sockaddr_in *sin;
1918
1919         sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1920         sin->sin_port = htons(bind_list->port);
1921         id_priv->bind_list = bind_list;
1922         hlist_add_head(&id_priv->node, &bind_list->owners);
1923 }
1924
1925 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1926                           unsigned short snum)
1927 {
1928         struct rdma_bind_list *bind_list;
1929         int port, ret;
1930
1931         bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1932         if (!bind_list)
1933                 return -ENOMEM;
1934
1935         do {
1936                 ret = idr_get_new_above(ps, bind_list, snum, &port);
1937         } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1938
1939         if (ret)
1940                 goto err1;
1941
1942         if (port != snum) {
1943                 ret = -EADDRNOTAVAIL;
1944                 goto err2;
1945         }
1946
1947         bind_list->ps = ps;
1948         bind_list->port = (unsigned short) port;
1949         cma_bind_port(bind_list, id_priv);
1950         return 0;
1951 err2:
1952         idr_remove(ps, port);
1953 err1:
1954         kfree(bind_list);
1955         return ret;
1956 }
1957
1958 static int cma_alloc_any_port(struct idr *ps, struct rdma_id_private *id_priv)
1959 {
1960         struct rdma_bind_list *bind_list;
1961         int port, ret, low, high;
1962
1963         bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1964         if (!bind_list)
1965                 return -ENOMEM;
1966
1967 retry:
1968         /* FIXME: add proper port randomization per like inet_csk_get_port */
1969         do {
1970                 ret = idr_get_new_above(ps, bind_list, next_port, &port);
1971         } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1972
1973         if (ret)
1974                 goto err1;
1975
1976         inet_get_local_port_range(&low, &high);
1977         if (port > high) {
1978                 if (next_port != low) {
1979                         idr_remove(ps, port);
1980                         next_port = low;
1981                         goto retry;
1982                 }
1983                 ret = -EADDRNOTAVAIL;
1984                 goto err2;
1985         }
1986
1987         if (port == high)
1988                 next_port = low;
1989         else
1990                 next_port = port + 1;
1991
1992         bind_list->ps = ps;
1993         bind_list->port = (unsigned short) port;
1994         cma_bind_port(bind_list, id_priv);
1995         return 0;
1996 err2:
1997         idr_remove(ps, port);
1998 err1:
1999         kfree(bind_list);
2000         return ret;
2001 }
2002
2003 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2004 {
2005         struct rdma_id_private *cur_id;
2006         struct sockaddr_in *sin, *cur_sin;
2007         struct rdma_bind_list *bind_list;
2008         struct hlist_node *node;
2009         unsigned short snum;
2010
2011         sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
2012         snum = ntohs(sin->sin_port);
2013         if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2014                 return -EACCES;
2015
2016         bind_list = idr_find(ps, snum);
2017         if (!bind_list)
2018                 return cma_alloc_port(ps, id_priv, snum);
2019
2020         /*
2021          * We don't support binding to any address if anyone is bound to
2022          * a specific address on the same port.
2023          */
2024         if (cma_any_addr(&id_priv->id.route.addr.src_addr))
2025                 return -EADDRNOTAVAIL;
2026
2027         hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2028                 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
2029                         return -EADDRNOTAVAIL;
2030
2031                 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
2032                 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
2033                         return -EADDRINUSE;
2034         }
2035
2036         cma_bind_port(bind_list, id_priv);
2037         return 0;
2038 }
2039
2040 static int cma_get_port(struct rdma_id_private *id_priv)
2041 {
2042         struct idr *ps;
2043         int ret;
2044
2045         switch (id_priv->id.ps) {
2046         case RDMA_PS_SDP:
2047                 ps = &sdp_ps;
2048                 break;
2049         case RDMA_PS_TCP:
2050                 ps = &tcp_ps;
2051                 break;
2052         case RDMA_PS_UDP:
2053                 ps = &udp_ps;
2054                 break;
2055         case RDMA_PS_IPOIB:
2056                 ps = &ipoib_ps;
2057                 break;
2058         default:
2059                 return -EPROTONOSUPPORT;
2060         }
2061
2062         mutex_lock(&lock);
2063         if (cma_any_port(&id_priv->id.route.addr.src_addr))
2064                 ret = cma_alloc_any_port(ps, id_priv);
2065         else
2066                 ret = cma_use_port(ps, id_priv);
2067         mutex_unlock(&lock);
2068
2069         return ret;
2070 }
2071
2072 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2073 {
2074         struct rdma_id_private *id_priv;
2075         int ret;
2076
2077         if (addr->sa_family != AF_INET)
2078                 return -EAFNOSUPPORT;
2079
2080         id_priv = container_of(id, struct rdma_id_private, id);
2081         if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
2082                 return -EINVAL;
2083
2084         if (!cma_any_addr(addr)) {
2085                 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
2086                 if (ret)
2087                         goto err1;
2088
2089                 mutex_lock(&lock);
2090                 ret = cma_acquire_dev(id_priv);
2091                 mutex_unlock(&lock);
2092                 if (ret)
2093                         goto err1;
2094         }
2095
2096         memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
2097         ret = cma_get_port(id_priv);
2098         if (ret)
2099                 goto err2;
2100
2101         return 0;
2102 err2:
2103         if (!cma_any_addr(addr)) {
2104                 mutex_lock(&lock);
2105                 cma_detach_from_dev(id_priv);
2106                 mutex_unlock(&lock);
2107         }
2108 err1:
2109         cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
2110         return ret;
2111 }
2112 EXPORT_SYMBOL(rdma_bind_addr);
2113
2114 static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
2115                           struct rdma_route *route)
2116 {
2117         struct sockaddr_in *src4, *dst4;
2118         struct cma_hdr *cma_hdr;
2119         struct sdp_hh *sdp_hdr;
2120
2121         src4 = (struct sockaddr_in *) &route->addr.src_addr;
2122         dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
2123
2124         switch (ps) {
2125         case RDMA_PS_SDP:
2126                 sdp_hdr = hdr;
2127                 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
2128                         return -EINVAL;
2129                 sdp_set_ip_ver(sdp_hdr, 4);
2130                 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2131                 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2132                 sdp_hdr->port = src4->sin_port;
2133                 break;
2134         default:
2135                 cma_hdr = hdr;
2136                 cma_hdr->cma_version = CMA_VERSION;
2137                 cma_set_ip_ver(cma_hdr, 4);
2138                 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
2139                 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
2140                 cma_hdr->port = src4->sin_port;
2141                 break;
2142         }
2143         return 0;
2144 }
2145
2146 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
2147                                 struct ib_cm_event *ib_event)
2148 {
2149         struct rdma_id_private *id_priv = cm_id->context;
2150         struct rdma_cm_event event;
2151         struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
2152         int ret = 0;
2153
2154         if (cma_disable_callback(id_priv, CMA_CONNECT))
2155                 return 0;
2156
2157         memset(&event, 0, sizeof event);
2158         switch (ib_event->event) {
2159         case IB_CM_SIDR_REQ_ERROR:
2160                 event.event = RDMA_CM_EVENT_UNREACHABLE;
2161                 event.status = -ETIMEDOUT;
2162                 break;
2163         case IB_CM_SIDR_REP_RECEIVED:
2164                 event.param.ud.private_data = ib_event->private_data;
2165                 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
2166                 if (rep->status != IB_SIDR_SUCCESS) {
2167                         event.event = RDMA_CM_EVENT_UNREACHABLE;
2168                         event.status = ib_event->param.sidr_rep_rcvd.status;
2169                         break;
2170                 }
2171                 if (id_priv->qkey != rep->qkey) {
2172                         event.event = RDMA_CM_EVENT_UNREACHABLE;
2173                         event.status = -EINVAL;
2174                         break;
2175                 }
2176                 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num,
2177                                      id_priv->id.route.path_rec,
2178                                      &event.param.ud.ah_attr);
2179                 event.param.ud.qp_num = rep->qpn;
2180                 event.param.ud.qkey = rep->qkey;
2181                 event.event = RDMA_CM_EVENT_ESTABLISHED;
2182                 event.status = 0;
2183                 break;
2184         default:
2185                 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d\n",
2186                        ib_event->event);
2187                 goto out;
2188         }
2189
2190         ret = id_priv->id.event_handler(&id_priv->id, &event);
2191         if (ret) {
2192                 /* Destroy the CM ID by returning a non-zero value. */
2193                 id_priv->cm_id.ib = NULL;
2194                 cma_exch(id_priv, CMA_DESTROYING);
2195                 mutex_unlock(&id_priv->handler_mutex);
2196                 rdma_destroy_id(&id_priv->id);
2197                 return ret;
2198         }
2199 out:
2200         mutex_unlock(&id_priv->handler_mutex);
2201         return ret;
2202 }
2203
2204 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2205                               struct rdma_conn_param *conn_param)
2206 {
2207         struct ib_cm_sidr_req_param req;
2208         struct rdma_route *route;
2209         int ret;
2210
2211         req.private_data_len = sizeof(struct cma_hdr) +
2212                                conn_param->private_data_len;
2213         req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2214         if (!req.private_data)
2215                 return -ENOMEM;
2216
2217         if (conn_param->private_data && conn_param->private_data_len)
2218                 memcpy((void *) req.private_data + sizeof(struct cma_hdr),
2219                        conn_param->private_data, conn_param->private_data_len);
2220
2221         route = &id_priv->id.route;
2222         ret = cma_format_hdr((void *) req.private_data, id_priv->id.ps, route);
2223         if (ret)
2224                 goto out;
2225
2226         id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
2227                                             cma_sidr_rep_handler, id_priv);
2228         if (IS_ERR(id_priv->cm_id.ib)) {
2229                 ret = PTR_ERR(id_priv->cm_id.ib);
2230                 goto out;
2231         }
2232
2233         req.path = route->path_rec;
2234         req.service_id = cma_get_service_id(id_priv->id.ps,
2235                                             &route->addr.dst_addr);
2236         req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
2237         req.max_cm_retries = CMA_MAX_CM_RETRIES;
2238
2239         ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
2240         if (ret) {
2241                 ib_destroy_cm_id(id_priv->cm_id.ib);
2242                 id_priv->cm_id.ib = NULL;
2243         }
2244 out:
2245         kfree(req.private_data);
2246         return ret;
2247 }
2248
2249 static int cma_connect_ib(struct rdma_id_private *id_priv,
2250                           struct rdma_conn_param *conn_param)
2251 {
2252         struct ib_cm_req_param req;
2253         struct rdma_route *route;
2254         void *private_data;
2255         int offset, ret;
2256
2257         memset(&req, 0, sizeof req);
2258         offset = cma_user_data_offset(id_priv->id.ps);
2259         req.private_data_len = offset + conn_param->private_data_len;
2260         private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
2261         if (!private_data)
2262                 return -ENOMEM;
2263
2264         if (conn_param->private_data && conn_param->private_data_len)
2265                 memcpy(private_data + offset, conn_param->private_data,
2266                        conn_param->private_data_len);
2267
2268         id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
2269                                             id_priv);
2270         if (IS_ERR(id_priv->cm_id.ib)) {
2271                 ret = PTR_ERR(id_priv->cm_id.ib);
2272                 goto out;
2273         }
2274
2275         route = &id_priv->id.route;
2276         ret = cma_format_hdr(private_data, id_priv->id.ps, route);
2277         if (ret)
2278                 goto out;
2279         req.private_data = private_data;
2280
2281         req.primary_path = &route->path_rec[0];
2282         if (route->num_paths == 2)
2283                 req.alternate_path = &route->path_rec[1];
2284
2285         req.service_id = cma_get_service_id(id_priv->id.ps,
2286                                             &route->addr.dst_addr);
2287         req.qp_num = id_priv->qp_num;
2288         req.qp_type = IB_QPT_RC;
2289         req.starting_psn = id_priv->seq_num;
2290         req.responder_resources = conn_param->responder_resources;
2291         req.initiator_depth = conn_param->initiator_depth;
2292         req.flow_control = conn_param->flow_control;
2293         req.retry_count = conn_param->retry_count;
2294         req.rnr_retry_count = conn_param->rnr_retry_count;
2295         req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2296         req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
2297         req.max_cm_retries = CMA_MAX_CM_RETRIES;
2298         req.srq = id_priv->srq ? 1 : 0;
2299
2300         ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2301 out:
2302         if (ret && !IS_ERR(id_priv->cm_id.ib)) {
2303                 ib_destroy_cm_id(id_priv->cm_id.ib);
2304                 id_priv->cm_id.ib = NULL;
2305         }
2306
2307         kfree(private_data);
2308         return ret;
2309 }
2310
2311 static int cma_connect_iw(struct rdma_id_private *id_priv,
2312                           struct rdma_conn_param *conn_param)
2313 {
2314         struct iw_cm_id *cm_id;
2315         struct sockaddr_in* sin;
2316         int ret;
2317         struct iw_cm_conn_param iw_param;
2318
2319         cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2320         if (IS_ERR(cm_id)) {
2321                 ret = PTR_ERR(cm_id);
2322                 goto out;
2323         }
2324
2325         id_priv->cm_id.iw = cm_id;
2326
2327         sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
2328         cm_id->local_addr = *sin;
2329
2330         sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
2331         cm_id->remote_addr = *sin;
2332
2333         ret = cma_modify_qp_rtr(id_priv, conn_param);
2334         if (ret)
2335                 goto out;
2336
2337         iw_param.ord = conn_param->initiator_depth;
2338         iw_param.ird = conn_param->responder_resources;
2339         iw_param.private_data = conn_param->private_data;
2340         iw_param.private_data_len = conn_param->private_data_len;
2341         if (id_priv->id.qp)
2342                 iw_param.qpn = id_priv->qp_num;
2343         else
2344                 iw_param.qpn = conn_param->qp_num;
2345         ret = iw_cm_connect(cm_id, &iw_param);
2346 out:
2347         if (ret && !IS_ERR(cm_id)) {
2348                 iw_destroy_cm_id(cm_id);
2349                 id_priv->cm_id.iw = NULL;
2350         }
2351         return ret;
2352 }
2353
2354 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2355 {
2356         struct rdma_id_private *id_priv;
2357         int ret;
2358
2359         id_priv = container_of(id, struct rdma_id_private, id);
2360         if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
2361                 return -EINVAL;
2362
2363         if (!id->qp) {
2364                 id_priv->qp_num = conn_param->qp_num;
2365                 id_priv->srq = conn_param->srq;
2366         }
2367
2368         switch (rdma_node_get_transport(id->device->node_type)) {
2369         case RDMA_TRANSPORT_IB:
2370                 if (cma_is_ud_ps(id->ps))
2371                         ret = cma_resolve_ib_udp(id_priv, conn_param);
2372                 else
2373                         ret = cma_connect_ib(id_priv, conn_param);
2374                 break;
2375         case RDMA_TRANSPORT_IWARP:
2376                 ret = cma_connect_iw(id_priv, conn_param);
2377                 break;
2378         default:
2379                 ret = -ENOSYS;
2380                 break;
2381         }
2382         if (ret)
2383                 goto err;
2384
2385         return 0;
2386 err:
2387         cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
2388         return ret;
2389 }
2390 EXPORT_SYMBOL(rdma_connect);
2391
2392 static int cma_accept_ib(struct rdma_id_private *id_priv,
2393                          struct rdma_conn_param *conn_param)
2394 {
2395         struct ib_cm_rep_param rep;
2396         int ret;
2397
2398         ret = cma_modify_qp_rtr(id_priv, conn_param);
2399         if (ret)
2400                 goto out;
2401
2402         ret = cma_modify_qp_rts(id_priv, conn_param);
2403         if (ret)
2404                 goto out;
2405
2406         memset(&rep, 0, sizeof rep);
2407         rep.qp_num = id_priv->qp_num;
2408         rep.starting_psn = id_priv->seq_num;
2409         rep.private_data = conn_param->private_data;
2410         rep.private_data_len = conn_param->private_data_len;
2411         rep.responder_resources = conn_param->responder_resources;
2412         rep.initiator_depth = conn_param->initiator_depth;
2413         rep.failover_accepted = 0;
2414         rep.flow_control = conn_param->flow_control;
2415         rep.rnr_retry_count = conn_param->rnr_retry_count;
2416         rep.srq = id_priv->srq ? 1 : 0;
2417
2418         ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
2419 out:
2420         return ret;
2421 }
2422
2423 static int cma_accept_iw(struct rdma_id_private *id_priv,
2424                   struct rdma_conn_param *conn_param)
2425 {
2426         struct iw_cm_conn_param iw_param;
2427         int ret;
2428
2429         ret = cma_modify_qp_rtr(id_priv, conn_param);
2430         if (ret)
2431                 return ret;
2432
2433         iw_param.ord = conn_param->initiator_depth;
2434         iw_param.ird = conn_param->responder_resources;
2435         iw_param.private_data = conn_param->private_data;
2436         iw_param.private_data_len = conn_param->private_data_len;
2437         if (id_priv->id.qp) {
2438                 iw_param.qpn = id_priv->qp_num;
2439         } else
2440                 iw_param.qpn = conn_param->qp_num;
2441
2442         return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2443 }
2444
2445 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
2446                              enum ib_cm_sidr_status status,
2447                              const void *private_data, int private_data_len)
2448 {
2449         struct ib_cm_sidr_rep_param rep;
2450
2451         memset(&rep, 0, sizeof rep);
2452         rep.status = status;
2453         if (status == IB_SIDR_SUCCESS) {
2454                 rep.qp_num = id_priv->qp_num;
2455                 rep.qkey = id_priv->qkey;
2456         }
2457         rep.private_data = private_data;
2458         rep.private_data_len = private_data_len;
2459
2460         return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
2461 }
2462
2463 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2464 {
2465         struct rdma_id_private *id_priv;
2466         int ret;
2467
2468         id_priv = container_of(id, struct rdma_id_private, id);
2469         if (!cma_comp(id_priv, CMA_CONNECT))
2470                 return -EINVAL;
2471
2472         if (!id->qp && conn_param) {
2473                 id_priv->qp_num = conn_param->qp_num;
2474                 id_priv->srq = conn_param->srq;
2475         }
2476
2477         switch (rdma_node_get_transport(id->device->node_type)) {
2478         case RDMA_TRANSPORT_IB:
2479                 if (cma_is_ud_ps(id->ps))
2480                         ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
2481                                                 conn_param->private_data,
2482                                                 conn_param->private_data_len);
2483                 else if (conn_param)
2484                         ret = cma_accept_ib(id_priv, conn_param);
2485                 else
2486                         ret = cma_rep_recv(id_priv);
2487                 break;
2488         case RDMA_TRANSPORT_IWARP:
2489                 ret = cma_accept_iw(id_priv, conn_param);
2490                 break;
2491         default:
2492                 ret = -ENOSYS;
2493                 break;
2494         }
2495
2496         if (ret)
2497                 goto reject;
2498
2499         return 0;
2500 reject:
2501         cma_modify_qp_err(id_priv);
2502         rdma_reject(id, NULL, 0);
2503         return ret;
2504 }
2505 EXPORT_SYMBOL(rdma_accept);
2506
2507 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2508 {
2509         struct rdma_id_private *id_priv;
2510         int ret;
2511
2512         id_priv = container_of(id, struct rdma_id_private, id);
2513         if (!cma_has_cm_dev(id_priv))
2514                 return -EINVAL;
2515
2516         switch (id->device->node_type) {
2517         case RDMA_NODE_IB_CA:
2518                 ret = ib_cm_notify(id_priv->cm_id.ib, event);
2519                 break;
2520         default:
2521                 ret = 0;
2522                 break;
2523         }
2524         return ret;
2525 }
2526 EXPORT_SYMBOL(rdma_notify);
2527
2528 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2529                 u8 private_data_len)
2530 {
2531         struct rdma_id_private *id_priv;
2532         int ret;
2533
2534         id_priv = container_of(id, struct rdma_id_private, id);
2535         if (!cma_has_cm_dev(id_priv))
2536                 return -EINVAL;
2537
2538         switch (rdma_node_get_transport(id->device->node_type)) {
2539         case RDMA_TRANSPORT_IB:
2540                 if (cma_is_ud_ps(id->ps))
2541                         ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
2542                                                 private_data, private_data_len);
2543                 else
2544                         ret = ib_send_cm_rej(id_priv->cm_id.ib,
2545                                              IB_CM_REJ_CONSUMER_DEFINED, NULL,
2546                                              0, private_data, private_data_len);
2547                 break;
2548         case RDMA_TRANSPORT_IWARP:
2549                 ret = iw_cm_reject(id_priv->cm_id.iw,
2550                                    private_data, private_data_len);
2551                 break;
2552         default:
2553                 ret = -ENOSYS;
2554                 break;
2555         }
2556         return ret;
2557 }
2558 EXPORT_SYMBOL(rdma_reject);
2559
2560 int rdma_disconnect(struct rdma_cm_id *id)
2561 {
2562         struct rdma_id_private *id_priv;
2563         int ret;
2564
2565         id_priv = container_of(id, struct rdma_id_private, id);
2566         if (!cma_has_cm_dev(id_priv))
2567                 return -EINVAL;
2568
2569         switch (rdma_node_get_transport(id->device->node_type)) {
2570         case RDMA_TRANSPORT_IB:
2571                 ret = cma_modify_qp_err(id_priv);
2572                 if (ret)
2573                         goto out;
2574                 /* Initiate or respond to a disconnect. */
2575                 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2576                         ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2577                 break;
2578         case RDMA_TRANSPORT_IWARP:
2579                 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2580                 break;
2581         default:
2582                 ret = -EINVAL;
2583                 break;
2584         }
2585 out:
2586         return ret;
2587 }
2588 EXPORT_SYMBOL(rdma_disconnect);
2589
2590 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
2591 {
2592         struct rdma_id_private *id_priv;
2593         struct cma_multicast *mc = multicast->context;
2594         struct rdma_cm_event event;
2595         int ret;
2596
2597         id_priv = mc->id_priv;
2598         if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
2599             cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
2600                 return 0;
2601
2602         mutex_lock(&id_priv->qp_mutex);
2603         if (!status && id_priv->id.qp)
2604                 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
2605                                          multicast->rec.mlid);
2606         mutex_unlock(&id_priv->qp_mutex);
2607
2608         memset(&event, 0, sizeof event);
2609         event.status = status;
2610         event.param.ud.private_data = mc->context;
2611         if (!status) {
2612                 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
2613                 ib_init_ah_from_mcmember(id_priv->id.device,
2614                                          id_priv->id.port_num, &multicast->rec,
2615                                          &event.param.ud.ah_attr);
2616                 event.param.ud.qp_num = 0xFFFFFF;
2617                 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
2618         } else
2619                 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
2620
2621         ret = id_priv->id.event_handler(&id_priv->id, &event);
2622         if (ret) {
2623                 cma_exch(id_priv, CMA_DESTROYING);
2624                 mutex_unlock(&id_priv->handler_mutex);
2625                 rdma_destroy_id(&id_priv->id);
2626                 return 0;
2627         }
2628
2629         mutex_unlock(&id_priv->handler_mutex);
2630         return 0;
2631 }
2632
2633 static void cma_set_mgid(struct rdma_id_private *id_priv,
2634                          struct sockaddr *addr, union ib_gid *mgid)
2635 {
2636         unsigned char mc_map[MAX_ADDR_LEN];
2637         struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2638         struct sockaddr_in *sin = (struct sockaddr_in *) addr;
2639         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
2640
2641         if (cma_any_addr(addr)) {
2642                 memset(mgid, 0, sizeof *mgid);
2643         } else if ((addr->sa_family == AF_INET6) &&
2644                    ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFF10A01B) ==
2645                                                                  0xFF10A01B)) {
2646                 /* IPv6 address is an SA assigned MGID. */
2647                 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
2648         } else {
2649                 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
2650                 if (id_priv->id.ps == RDMA_PS_UDP)
2651                         mc_map[7] = 0x01;       /* Use RDMA CM signature */
2652                 *mgid = *(union ib_gid *) (mc_map + 4);
2653         }
2654 }
2655
2656 static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
2657                                  struct cma_multicast *mc)
2658 {
2659         struct ib_sa_mcmember_rec rec;
2660         struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2661         ib_sa_comp_mask comp_mask;
2662         int ret;
2663
2664         ib_addr_get_mgid(dev_addr, &rec.mgid);
2665         ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
2666                                      &rec.mgid, &rec);
2667         if (ret)
2668                 return ret;
2669
2670         cma_set_mgid(id_priv, &mc->addr, &rec.mgid);
2671         if (id_priv->id.ps == RDMA_PS_UDP)
2672                 rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
2673         ib_addr_get_sgid(dev_addr, &rec.port_gid);
2674         rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2675         rec.join_state = 1;
2676
2677         comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
2678                     IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
2679                     IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
2680                     IB_SA_MCMEMBER_REC_FLOW_LABEL |
2681                     IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
2682
2683         mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
2684                                                 id_priv->id.port_num, &rec,
2685                                                 comp_mask, GFP_KERNEL,
2686                                                 cma_ib_mc_handler, mc);
2687         if (IS_ERR(mc->multicast.ib))
2688                 return PTR_ERR(mc->multicast.ib);
2689
2690         return 0;
2691 }
2692
2693 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
2694                         void *context)
2695 {
2696         struct rdma_id_private *id_priv;
2697         struct cma_multicast *mc;
2698         int ret;
2699
2700         id_priv = container_of(id, struct rdma_id_private, id);
2701         if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
2702             !cma_comp(id_priv, CMA_ADDR_RESOLVED))
2703                 return -EINVAL;
2704
2705         mc = kmalloc(sizeof *mc, GFP_KERNEL);
2706         if (!mc)
2707                 return -ENOMEM;
2708
2709         memcpy(&mc->addr, addr, ip_addr_size(addr));
2710         mc->context = context;
2711         mc->id_priv = id_priv;
2712
2713         spin_lock(&id_priv->lock);
2714         list_add(&mc->list, &id_priv->mc_list);
2715         spin_unlock(&id_priv->lock);
2716
2717         switch (rdma_node_get_transport(id->device->node_type)) {
2718         case RDMA_TRANSPORT_IB:
2719                 ret = cma_join_ib_multicast(id_priv, mc);
2720                 break;
2721         default:
2722                 ret = -ENOSYS;
2723                 break;
2724         }
2725
2726         if (ret) {
2727                 spin_lock_irq(&id_priv->lock);
2728                 list_del(&mc->list);
2729                 spin_unlock_irq(&id_priv->lock);
2730                 kfree(mc);
2731         }
2732         return ret;
2733 }
2734 EXPORT_SYMBOL(rdma_join_multicast);
2735
2736 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
2737 {
2738         struct rdma_id_private *id_priv;
2739         struct cma_multicast *mc;
2740
2741         id_priv = container_of(id, struct rdma_id_private, id);
2742         spin_lock_irq(&id_priv->lock);
2743         list_for_each_entry(mc, &id_priv->mc_list, list) {
2744                 if (!memcmp(&mc->addr, addr, ip_addr_size(addr))) {
2745                         list_del(&mc->list);
2746                         spin_unlock_irq(&id_priv->lock);
2747
2748                         if (id->qp)
2749                                 ib_detach_mcast(id->qp,
2750                                                 &mc->multicast.ib->rec.mgid,
2751                                                 mc->multicast.ib->rec.mlid);
2752                         ib_sa_free_multicast(mc->multicast.ib);
2753                         kfree(mc);
2754                         return;
2755                 }
2756         }
2757         spin_unlock_irq(&id_priv->lock);
2758 }
2759 EXPORT_SYMBOL(rdma_leave_multicast);
2760
2761 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
2762 {
2763         struct rdma_dev_addr *dev_addr;
2764         struct cma_ndev_work *work;
2765
2766         dev_addr = &id_priv->id.route.addr.dev_addr;
2767
2768         if ((dev_addr->src_dev == ndev) &&
2769             memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
2770                 printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n",
2771                        ndev->name, &id_priv->id);
2772                 work = kzalloc(sizeof *work, GFP_KERNEL);
2773                 if (!work)
2774                         return -ENOMEM;
2775
2776                 INIT_WORK(&work->work, cma_ndev_work_handler);
2777                 work->id = id_priv;
2778                 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
2779                 atomic_inc(&id_priv->refcount);
2780                 queue_work(cma_wq, &work->work);
2781         }
2782
2783         return 0;
2784 }
2785
2786 static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
2787                                void *ctx)
2788 {
2789         struct net_device *ndev = (struct net_device *)ctx;
2790         struct cma_device *cma_dev;
2791         struct rdma_id_private *id_priv;
2792         int ret = NOTIFY_DONE;
2793
2794         if (dev_net(ndev) != &init_net)
2795                 return NOTIFY_DONE;
2796
2797         if (event != NETDEV_BONDING_FAILOVER)
2798                 return NOTIFY_DONE;
2799
2800         if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING))
2801                 return NOTIFY_DONE;
2802
2803         mutex_lock(&lock);
2804         list_for_each_entry(cma_dev, &dev_list, list)
2805                 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
2806                         ret = cma_netdev_change(ndev, id_priv);
2807                         if (ret)
2808                                 goto out;
2809                 }
2810
2811 out:
2812         mutex_unlock(&lock);
2813         return ret;
2814 }
2815
2816 static struct notifier_block cma_nb = {
2817         .notifier_call = cma_netdev_callback
2818 };
2819
2820 static void cma_add_one(struct ib_device *device)
2821 {
2822         struct cma_device *cma_dev;
2823         struct rdma_id_private *id_priv;
2824
2825         cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
2826         if (!cma_dev)
2827                 return;
2828
2829         cma_dev->device = device;
2830
2831         init_completion(&cma_dev->comp);
2832         atomic_set(&cma_dev->refcount, 1);
2833         INIT_LIST_HEAD(&cma_dev->id_list);
2834         ib_set_client_data(device, &cma_client, cma_dev);
2835
2836         mutex_lock(&lock);
2837         list_add_tail(&cma_dev->list, &dev_list);
2838         list_for_each_entry(id_priv, &listen_any_list, list)
2839                 cma_listen_on_dev(id_priv, cma_dev);
2840         mutex_unlock(&lock);
2841 }
2842
2843 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2844 {
2845         struct rdma_cm_event event;
2846         enum cma_state state;
2847         int ret = 0;
2848
2849         /* Record that we want to remove the device */
2850         state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
2851         if (state == CMA_DESTROYING)
2852                 return 0;
2853
2854         cma_cancel_operation(id_priv, state);
2855         mutex_lock(&id_priv->handler_mutex);
2856
2857         /* Check for destruction from another callback. */
2858         if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2859                 goto out;
2860
2861         memset(&event, 0, sizeof event);
2862         event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
2863         ret = id_priv->id.event_handler(&id_priv->id, &event);
2864 out:
2865         mutex_unlock(&id_priv->handler_mutex);
2866         return ret;
2867 }
2868
2869 static void cma_process_remove(struct cma_device *cma_dev)
2870 {
2871         struct rdma_id_private *id_priv;
2872         int ret;
2873
2874         mutex_lock(&lock);
2875         while (!list_empty(&cma_dev->id_list)) {
2876                 id_priv = list_entry(cma_dev->id_list.next,
2877                                      struct rdma_id_private, list);
2878
2879                 list_del(&id_priv->listen_list);
2880                 list_del_init(&id_priv->list);
2881                 atomic_inc(&id_priv->refcount);
2882                 mutex_unlock(&lock);
2883
2884                 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
2885                 cma_deref_id(id_priv);
2886                 if (ret)
2887                         rdma_destroy_id(&id_priv->id);
2888
2889                 mutex_lock(&lock);
2890         }
2891         mutex_unlock(&lock);
2892
2893         cma_deref_dev(cma_dev);
2894         wait_for_completion(&cma_dev->comp);
2895 }
2896
2897 static void cma_remove_one(struct ib_device *device)
2898 {
2899         struct cma_device *cma_dev;
2900
2901         cma_dev = ib_get_client_data(device, &cma_client);
2902         if (!cma_dev)
2903                 return;
2904
2905         mutex_lock(&lock);
2906         list_del(&cma_dev->list);
2907         mutex_unlock(&lock);
2908
2909         cma_process_remove(cma_dev);
2910         kfree(cma_dev);
2911 }
2912
2913 static int cma_init(void)
2914 {
2915         int ret, low, high, remaining;
2916
2917         get_random_bytes(&next_port, sizeof next_port);
2918         inet_get_local_port_range(&low, &high);
2919         remaining = (high - low) + 1;
2920         next_port = ((unsigned int) next_port % remaining) + low;
2921
2922         cma_wq = create_singlethread_workqueue("rdma_cm");
2923         if (!cma_wq)
2924                 return -ENOMEM;
2925
2926         ib_sa_register_client(&sa_client);
2927         rdma_addr_register_client(&addr_client);
2928         register_netdevice_notifier(&cma_nb);
2929
2930         ret = ib_register_client(&cma_client);
2931         if (ret)
2932                 goto err;
2933         return 0;
2934
2935 err:
2936         unregister_netdevice_notifier(&cma_nb);
2937         rdma_addr_unregister_client(&addr_client);
2938         ib_sa_unregister_client(&sa_client);
2939         destroy_workqueue(cma_wq);
2940         return ret;
2941 }
2942
2943 static void cma_cleanup(void)
2944 {
2945         ib_unregister_client(&cma_client);
2946         unregister_netdevice_notifier(&cma_nb);
2947         rdma_addr_unregister_client(&addr_client);
2948         ib_sa_unregister_client(&sa_client);
2949         destroy_workqueue(cma_wq);
2950         idr_destroy(&sdp_ps);
2951         idr_destroy(&tcp_ps);
2952         idr_destroy(&udp_ps);
2953         idr_destroy(&ipoib_ps);
2954 }
2955
2956 module_init(cma_init);
2957 module_exit(cma_cleanup);