9aba4ea949331a1b9b7584a1bba9aa24217d7bf9
[pandora-kernel.git] / drivers / infiniband / core / cma.c
1 /*
2  * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
3  * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
4  * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
5  * Copyright (c) 2005-2006 Intel Corporation.  All rights reserved.
6  *
7  * This Software is licensed under one of the following licenses:
8  *
9  * 1) under the terms of the "Common Public License 1.0" a copy of which is
10  *    available from the Open Source Initiative, see
11  *    http://www.opensource.org/licenses/cpl.php.
12  *
13  * 2) under the terms of the "The BSD License" a copy of which is
14  *    available from the Open Source Initiative, see
15  *    http://www.opensource.org/licenses/bsd-license.php.
16  *
17  * 3) under the terms of the "GNU General Public License (GPL) Version 2" a
18  *    copy of which is available from the Open Source Initiative, see
19  *    http://www.opensource.org/licenses/gpl-license.php.
20  *
21  * Licensee has the right to choose one of the above licenses.
22  *
23  * Redistributions of source code must retain the above copyright
24  * notice and one of the license notices.
25  *
26  * Redistributions in binary form must reproduce both the above copyright
27  * notice, one of the license notices in the documentation
28  * and/or other materials provided with the distribution.
29  *
30  */
31
32 #include <linux/completion.h>
33 #include <linux/in.h>
34 #include <linux/in6.h>
35 #include <linux/mutex.h>
36 #include <linux/random.h>
37 #include <linux/idr.h>
38 #include <linux/inetdevice.h>
39
40 #include <net/tcp.h>
41
42 #include <rdma/rdma_cm.h>
43 #include <rdma/rdma_cm_ib.h>
44 #include <rdma/ib_cache.h>
45 #include <rdma/ib_cm.h>
46 #include <rdma/ib_sa.h>
47 #include <rdma/iw_cm.h>
48
49 MODULE_AUTHOR("Sean Hefty");
50 MODULE_DESCRIPTION("Generic RDMA CM Agent");
51 MODULE_LICENSE("Dual BSD/GPL");
52
53 #define CMA_CM_RESPONSE_TIMEOUT 20
54 #define CMA_MAX_CM_RETRIES 15
55
56 static void cma_add_one(struct ib_device *device);
57 static void cma_remove_one(struct ib_device *device);
58
59 static struct ib_client cma_client = {
60         .name   = "cma",
61         .add    = cma_add_one,
62         .remove = cma_remove_one
63 };
64
65 static struct ib_sa_client sa_client;
66 static struct rdma_addr_client addr_client;
67 static LIST_HEAD(dev_list);
68 static LIST_HEAD(listen_any_list);
69 static DEFINE_MUTEX(lock);
70 static struct workqueue_struct *cma_wq;
71 static DEFINE_IDR(sdp_ps);
72 static DEFINE_IDR(tcp_ps);
73
74 struct cma_device {
75         struct list_head        list;
76         struct ib_device        *device;
77         __be64                  node_guid;
78         struct completion       comp;
79         atomic_t                refcount;
80         struct list_head        id_list;
81 };
82
83 enum cma_state {
84         CMA_IDLE,
85         CMA_ADDR_QUERY,
86         CMA_ADDR_RESOLVED,
87         CMA_ROUTE_QUERY,
88         CMA_ROUTE_RESOLVED,
89         CMA_CONNECT,
90         CMA_DISCONNECT,
91         CMA_ADDR_BOUND,
92         CMA_LISTEN,
93         CMA_DEVICE_REMOVAL,
94         CMA_DESTROYING
95 };
96
97 struct rdma_bind_list {
98         struct idr              *ps;
99         struct hlist_head       owners;
100         unsigned short          port;
101 };
102
103 /*
104  * Device removal can occur at anytime, so we need extra handling to
105  * serialize notifying the user of device removal with other callbacks.
106  * We do this by disabling removal notification while a callback is in process,
107  * and reporting it after the callback completes.
108  */
109 struct rdma_id_private {
110         struct rdma_cm_id       id;
111
112         struct rdma_bind_list   *bind_list;
113         struct hlist_node       node;
114         struct list_head        list;
115         struct list_head        listen_list;
116         struct cma_device       *cma_dev;
117
118         enum cma_state          state;
119         spinlock_t              lock;
120         struct completion       comp;
121         atomic_t                refcount;
122         wait_queue_head_t       wait_remove;
123         atomic_t                dev_remove;
124
125         int                     backlog;
126         int                     timeout_ms;
127         struct ib_sa_query      *query;
128         int                     query_id;
129         union {
130                 struct ib_cm_id *ib;
131                 struct iw_cm_id *iw;
132         } cm_id;
133
134         u32                     seq_num;
135         u32                     qp_num;
136         enum ib_qp_type         qp_type;
137         u8                      srq;
138 };
139
140 struct cma_work {
141         struct work_struct      work;
142         struct rdma_id_private  *id;
143         enum cma_state          old_state;
144         enum cma_state          new_state;
145         struct rdma_cm_event    event;
146 };
147
148 union cma_ip_addr {
149         struct in6_addr ip6;
150         struct {
151                 __u32 pad[3];
152                 __u32 addr;
153         } ip4;
154 };
155
156 struct cma_hdr {
157         u8 cma_version;
158         u8 ip_version;  /* IP version: 7:4 */
159         __u16 port;
160         union cma_ip_addr src_addr;
161         union cma_ip_addr dst_addr;
162 };
163
164 struct sdp_hh {
165         u8 bsdh[16];
166         u8 sdp_version; /* Major version: 7:4 */
167         u8 ip_version;  /* IP version: 7:4 */
168         u8 sdp_specific1[10];
169         __u16 port;
170         __u16 sdp_specific2;
171         union cma_ip_addr src_addr;
172         union cma_ip_addr dst_addr;
173 };
174
175 struct sdp_hah {
176         u8 bsdh[16];
177         u8 sdp_version;
178 };
179
180 #define CMA_VERSION 0x00
181 #define SDP_MAJ_VERSION 0x2
182
183 static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
184 {
185         unsigned long flags;
186         int ret;
187
188         spin_lock_irqsave(&id_priv->lock, flags);
189         ret = (id_priv->state == comp);
190         spin_unlock_irqrestore(&id_priv->lock, flags);
191         return ret;
192 }
193
194 static int cma_comp_exch(struct rdma_id_private *id_priv,
195                          enum cma_state comp, enum cma_state exch)
196 {
197         unsigned long flags;
198         int ret;
199
200         spin_lock_irqsave(&id_priv->lock, flags);
201         if ((ret = (id_priv->state == comp)))
202                 id_priv->state = exch;
203         spin_unlock_irqrestore(&id_priv->lock, flags);
204         return ret;
205 }
206
207 static enum cma_state cma_exch(struct rdma_id_private *id_priv,
208                                enum cma_state exch)
209 {
210         unsigned long flags;
211         enum cma_state old;
212
213         spin_lock_irqsave(&id_priv->lock, flags);
214         old = id_priv->state;
215         id_priv->state = exch;
216         spin_unlock_irqrestore(&id_priv->lock, flags);
217         return old;
218 }
219
220 static inline u8 cma_get_ip_ver(struct cma_hdr *hdr)
221 {
222         return hdr->ip_version >> 4;
223 }
224
225 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
226 {
227         hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
228 }
229
230 static inline u8 sdp_get_majv(u8 sdp_version)
231 {
232         return sdp_version >> 4;
233 }
234
235 static inline u8 sdp_get_ip_ver(struct sdp_hh *hh)
236 {
237         return hh->ip_version >> 4;
238 }
239
240 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
241 {
242         hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
243 }
244
245 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
246                               struct cma_device *cma_dev)
247 {
248         atomic_inc(&cma_dev->refcount);
249         id_priv->cma_dev = cma_dev;
250         id_priv->id.device = cma_dev->device;
251         list_add_tail(&id_priv->list, &cma_dev->id_list);
252 }
253
254 static inline void cma_deref_dev(struct cma_device *cma_dev)
255 {
256         if (atomic_dec_and_test(&cma_dev->refcount))
257                 complete(&cma_dev->comp);
258 }
259
260 static void cma_detach_from_dev(struct rdma_id_private *id_priv)
261 {
262         list_del(&id_priv->list);
263         cma_deref_dev(id_priv->cma_dev);
264         id_priv->cma_dev = NULL;
265 }
266
267 static int cma_acquire_dev(struct rdma_id_private *id_priv)
268 {
269         enum rdma_node_type dev_type = id_priv->id.route.addr.dev_addr.dev_type;
270         struct cma_device *cma_dev;
271         union ib_gid gid;
272         int ret = -ENODEV;
273
274         switch (rdma_node_get_transport(dev_type)) {
275         case RDMA_TRANSPORT_IB:
276                 ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
277                 break;
278         case RDMA_TRANSPORT_IWARP:
279                 iw_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
280                 break;
281         default:
282                 return -ENODEV;
283         }
284
285         list_for_each_entry(cma_dev, &dev_list, list) {
286                 ret = ib_find_cached_gid(cma_dev->device, &gid,
287                                          &id_priv->id.port_num, NULL);
288                 if (!ret) {
289                         cma_attach_to_dev(id_priv, cma_dev);
290                         break;
291                 }
292         }
293         return ret;
294 }
295
296 static void cma_deref_id(struct rdma_id_private *id_priv)
297 {
298         if (atomic_dec_and_test(&id_priv->refcount))
299                 complete(&id_priv->comp);
300 }
301
302 static void cma_release_remove(struct rdma_id_private *id_priv)
303 {
304         if (atomic_dec_and_test(&id_priv->dev_remove))
305                 wake_up(&id_priv->wait_remove);
306 }
307
308 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
309                                   void *context, enum rdma_port_space ps)
310 {
311         struct rdma_id_private *id_priv;
312
313         id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
314         if (!id_priv)
315                 return ERR_PTR(-ENOMEM);
316
317         id_priv->state = CMA_IDLE;
318         id_priv->id.context = context;
319         id_priv->id.event_handler = event_handler;
320         id_priv->id.ps = ps;
321         spin_lock_init(&id_priv->lock);
322         init_completion(&id_priv->comp);
323         atomic_set(&id_priv->refcount, 1);
324         init_waitqueue_head(&id_priv->wait_remove);
325         atomic_set(&id_priv->dev_remove, 0);
326         INIT_LIST_HEAD(&id_priv->listen_list);
327         get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
328
329         return &id_priv->id;
330 }
331 EXPORT_SYMBOL(rdma_create_id);
332
333 static int cma_init_ib_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
334 {
335         struct ib_qp_attr qp_attr;
336         struct rdma_dev_addr *dev_addr;
337         int ret;
338
339         dev_addr = &id_priv->id.route.addr.dev_addr;
340         ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
341                                   ib_addr_get_pkey(dev_addr),
342                                   &qp_attr.pkey_index);
343         if (ret)
344                 return ret;
345
346         qp_attr.qp_state = IB_QPS_INIT;
347         qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
348         qp_attr.port_num = id_priv->id.port_num;
349         return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS |
350                                           IB_QP_PKEY_INDEX | IB_QP_PORT);
351 }
352
353 static int cma_init_iw_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
354 {
355         struct ib_qp_attr qp_attr;
356
357         qp_attr.qp_state = IB_QPS_INIT;
358         qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
359
360         return ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_ACCESS_FLAGS);
361 }
362
363 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
364                    struct ib_qp_init_attr *qp_init_attr)
365 {
366         struct rdma_id_private *id_priv;
367         struct ib_qp *qp;
368         int ret;
369
370         id_priv = container_of(id, struct rdma_id_private, id);
371         if (id->device != pd->device)
372                 return -EINVAL;
373
374         qp = ib_create_qp(pd, qp_init_attr);
375         if (IS_ERR(qp))
376                 return PTR_ERR(qp);
377
378         switch (rdma_node_get_transport(id->device->node_type)) {
379         case RDMA_TRANSPORT_IB:
380                 ret = cma_init_ib_qp(id_priv, qp);
381                 break;
382         case RDMA_TRANSPORT_IWARP:
383                 ret = cma_init_iw_qp(id_priv, qp);
384                 break;
385         default:
386                 ret = -ENOSYS;
387                 break;
388         }
389
390         if (ret)
391                 goto err;
392
393         id->qp = qp;
394         id_priv->qp_num = qp->qp_num;
395         id_priv->qp_type = qp->qp_type;
396         id_priv->srq = (qp->srq != NULL);
397         return 0;
398 err:
399         ib_destroy_qp(qp);
400         return ret;
401 }
402 EXPORT_SYMBOL(rdma_create_qp);
403
404 void rdma_destroy_qp(struct rdma_cm_id *id)
405 {
406         ib_destroy_qp(id->qp);
407 }
408 EXPORT_SYMBOL(rdma_destroy_qp);
409
410 static int cma_modify_qp_rtr(struct rdma_cm_id *id)
411 {
412         struct ib_qp_attr qp_attr;
413         int qp_attr_mask, ret;
414
415         if (!id->qp)
416                 return 0;
417
418         /* Need to update QP attributes from default values. */
419         qp_attr.qp_state = IB_QPS_INIT;
420         ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
421         if (ret)
422                 return ret;
423
424         ret = ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
425         if (ret)
426                 return ret;
427
428         qp_attr.qp_state = IB_QPS_RTR;
429         ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
430         if (ret)
431                 return ret;
432
433         return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
434 }
435
436 static int cma_modify_qp_rts(struct rdma_cm_id *id)
437 {
438         struct ib_qp_attr qp_attr;
439         int qp_attr_mask, ret;
440
441         if (!id->qp)
442                 return 0;
443
444         qp_attr.qp_state = IB_QPS_RTS;
445         ret = rdma_init_qp_attr(id, &qp_attr, &qp_attr_mask);
446         if (ret)
447                 return ret;
448
449         return ib_modify_qp(id->qp, &qp_attr, qp_attr_mask);
450 }
451
452 static int cma_modify_qp_err(struct rdma_cm_id *id)
453 {
454         struct ib_qp_attr qp_attr;
455
456         if (!id->qp)
457                 return 0;
458
459         qp_attr.qp_state = IB_QPS_ERR;
460         return ib_modify_qp(id->qp, &qp_attr, IB_QP_STATE);
461 }
462
463 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
464                        int *qp_attr_mask)
465 {
466         struct rdma_id_private *id_priv;
467         int ret;
468
469         id_priv = container_of(id, struct rdma_id_private, id);
470         switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
471         case RDMA_TRANSPORT_IB:
472                 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
473                                          qp_attr_mask);
474                 if (qp_attr->qp_state == IB_QPS_RTR)
475                         qp_attr->rq_psn = id_priv->seq_num;
476                 break;
477         case RDMA_TRANSPORT_IWARP:
478                 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
479                                         qp_attr_mask);
480                 break;
481         default:
482                 ret = -ENOSYS;
483                 break;
484         }
485
486         return ret;
487 }
488 EXPORT_SYMBOL(rdma_init_qp_attr);
489
490 static inline int cma_zero_addr(struct sockaddr *addr)
491 {
492         struct in6_addr *ip6;
493
494         if (addr->sa_family == AF_INET)
495                 return ZERONET(((struct sockaddr_in *) addr)->sin_addr.s_addr);
496         else {
497                 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
498                 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
499                         ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
500         }
501 }
502
503 static inline int cma_loopback_addr(struct sockaddr *addr)
504 {
505         return LOOPBACK(((struct sockaddr_in *) addr)->sin_addr.s_addr);
506 }
507
508 static inline int cma_any_addr(struct sockaddr *addr)
509 {
510         return cma_zero_addr(addr) || cma_loopback_addr(addr);
511 }
512
513 static inline int cma_any_port(struct sockaddr *addr)
514 {
515         return !((struct sockaddr_in *) addr)->sin_port;
516 }
517
518 static int cma_get_net_info(void *hdr, enum rdma_port_space ps,
519                             u8 *ip_ver, __u16 *port,
520                             union cma_ip_addr **src, union cma_ip_addr **dst)
521 {
522         switch (ps) {
523         case RDMA_PS_SDP:
524                 if (sdp_get_majv(((struct sdp_hh *) hdr)->sdp_version) !=
525                     SDP_MAJ_VERSION)
526                         return -EINVAL;
527
528                 *ip_ver = sdp_get_ip_ver(hdr);
529                 *port   = ((struct sdp_hh *) hdr)->port;
530                 *src    = &((struct sdp_hh *) hdr)->src_addr;
531                 *dst    = &((struct sdp_hh *) hdr)->dst_addr;
532                 break;
533         default:
534                 if (((struct cma_hdr *) hdr)->cma_version != CMA_VERSION)
535                         return -EINVAL;
536
537                 *ip_ver = cma_get_ip_ver(hdr);
538                 *port   = ((struct cma_hdr *) hdr)->port;
539                 *src    = &((struct cma_hdr *) hdr)->src_addr;
540                 *dst    = &((struct cma_hdr *) hdr)->dst_addr;
541                 break;
542         }
543
544         if (*ip_ver != 4 && *ip_ver != 6)
545                 return -EINVAL;
546         return 0;
547 }
548
549 static void cma_save_net_info(struct rdma_addr *addr,
550                               struct rdma_addr *listen_addr,
551                               u8 ip_ver, __u16 port,
552                               union cma_ip_addr *src, union cma_ip_addr *dst)
553 {
554         struct sockaddr_in *listen4, *ip4;
555         struct sockaddr_in6 *listen6, *ip6;
556
557         switch (ip_ver) {
558         case 4:
559                 listen4 = (struct sockaddr_in *) &listen_addr->src_addr;
560                 ip4 = (struct sockaddr_in *) &addr->src_addr;
561                 ip4->sin_family = listen4->sin_family;
562                 ip4->sin_addr.s_addr = dst->ip4.addr;
563                 ip4->sin_port = listen4->sin_port;
564
565                 ip4 = (struct sockaddr_in *) &addr->dst_addr;
566                 ip4->sin_family = listen4->sin_family;
567                 ip4->sin_addr.s_addr = src->ip4.addr;
568                 ip4->sin_port = port;
569                 break;
570         case 6:
571                 listen6 = (struct sockaddr_in6 *) &listen_addr->src_addr;
572                 ip6 = (struct sockaddr_in6 *) &addr->src_addr;
573                 ip6->sin6_family = listen6->sin6_family;
574                 ip6->sin6_addr = dst->ip6;
575                 ip6->sin6_port = listen6->sin6_port;
576
577                 ip6 = (struct sockaddr_in6 *) &addr->dst_addr;
578                 ip6->sin6_family = listen6->sin6_family;
579                 ip6->sin6_addr = src->ip6;
580                 ip6->sin6_port = port;
581                 break;
582         default:
583                 break;
584         }
585 }
586
587 static inline int cma_user_data_offset(enum rdma_port_space ps)
588 {
589         switch (ps) {
590         case RDMA_PS_SDP:
591                 return 0;
592         default:
593                 return sizeof(struct cma_hdr);
594         }
595 }
596
597 static int cma_notify_user(struct rdma_id_private *id_priv,
598                            enum rdma_cm_event_type type, int status,
599                            void *data, u8 data_len)
600 {
601         struct rdma_cm_event event;
602
603         event.event = type;
604         event.status = status;
605         event.private_data = data;
606         event.private_data_len = data_len;
607
608         return id_priv->id.event_handler(&id_priv->id, &event);
609 }
610
611 static void cma_cancel_route(struct rdma_id_private *id_priv)
612 {
613         switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
614         case RDMA_TRANSPORT_IB:
615                 if (id_priv->query)
616                         ib_sa_cancel_query(id_priv->query_id, id_priv->query);
617                 break;
618         default:
619                 break;
620         }
621 }
622
623 static inline int cma_internal_listen(struct rdma_id_private *id_priv)
624 {
625         return (id_priv->state == CMA_LISTEN) && id_priv->cma_dev &&
626                cma_any_addr(&id_priv->id.route.addr.src_addr);
627 }
628
629 static void cma_destroy_listen(struct rdma_id_private *id_priv)
630 {
631         cma_exch(id_priv, CMA_DESTROYING);
632
633         if (id_priv->cma_dev) {
634                 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
635                 case RDMA_TRANSPORT_IB:
636                         if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
637                                 ib_destroy_cm_id(id_priv->cm_id.ib);
638                         break;
639                 case RDMA_TRANSPORT_IWARP:
640                         if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
641                                 iw_destroy_cm_id(id_priv->cm_id.iw);
642                         break;
643                 default:
644                         break;
645                 }
646                 cma_detach_from_dev(id_priv);
647         }
648         list_del(&id_priv->listen_list);
649
650         cma_deref_id(id_priv);
651         wait_for_completion(&id_priv->comp);
652
653         kfree(id_priv);
654 }
655
656 static void cma_cancel_listens(struct rdma_id_private *id_priv)
657 {
658         struct rdma_id_private *dev_id_priv;
659
660         mutex_lock(&lock);
661         list_del(&id_priv->list);
662
663         while (!list_empty(&id_priv->listen_list)) {
664                 dev_id_priv = list_entry(id_priv->listen_list.next,
665                                          struct rdma_id_private, listen_list);
666                 cma_destroy_listen(dev_id_priv);
667         }
668         mutex_unlock(&lock);
669 }
670
671 static void cma_cancel_operation(struct rdma_id_private *id_priv,
672                                  enum cma_state state)
673 {
674         switch (state) {
675         case CMA_ADDR_QUERY:
676                 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
677                 break;
678         case CMA_ROUTE_QUERY:
679                 cma_cancel_route(id_priv);
680                 break;
681         case CMA_LISTEN:
682                 if (cma_any_addr(&id_priv->id.route.addr.src_addr) &&
683                     !id_priv->cma_dev)
684                         cma_cancel_listens(id_priv);
685                 break;
686         default:
687                 break;
688         }
689 }
690
691 static void cma_release_port(struct rdma_id_private *id_priv)
692 {
693         struct rdma_bind_list *bind_list = id_priv->bind_list;
694
695         if (!bind_list)
696                 return;
697
698         mutex_lock(&lock);
699         hlist_del(&id_priv->node);
700         if (hlist_empty(&bind_list->owners)) {
701                 idr_remove(bind_list->ps, bind_list->port);
702                 kfree(bind_list);
703         }
704         mutex_unlock(&lock);
705 }
706
707 void rdma_destroy_id(struct rdma_cm_id *id)
708 {
709         struct rdma_id_private *id_priv;
710         enum cma_state state;
711
712         id_priv = container_of(id, struct rdma_id_private, id);
713         state = cma_exch(id_priv, CMA_DESTROYING);
714         cma_cancel_operation(id_priv, state);
715
716         mutex_lock(&lock);
717         if (id_priv->cma_dev) {
718                 mutex_unlock(&lock);
719                 switch (rdma_node_get_transport(id->device->node_type)) {
720                 case RDMA_TRANSPORT_IB:
721                         if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
722                                 ib_destroy_cm_id(id_priv->cm_id.ib);
723                         break;
724                 case RDMA_TRANSPORT_IWARP:
725                         if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
726                                 iw_destroy_cm_id(id_priv->cm_id.iw);
727                         break;
728                 default:
729                         break;
730                 }
731                 mutex_lock(&lock);
732                 cma_detach_from_dev(id_priv);
733         }
734         mutex_unlock(&lock);
735
736         cma_release_port(id_priv);
737         cma_deref_id(id_priv);
738         wait_for_completion(&id_priv->comp);
739
740         kfree(id_priv->id.route.path_rec);
741         kfree(id_priv);
742 }
743 EXPORT_SYMBOL(rdma_destroy_id);
744
745 static int cma_rep_recv(struct rdma_id_private *id_priv)
746 {
747         int ret;
748
749         ret = cma_modify_qp_rtr(&id_priv->id);
750         if (ret)
751                 goto reject;
752
753         ret = cma_modify_qp_rts(&id_priv->id);
754         if (ret)
755                 goto reject;
756
757         ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
758         if (ret)
759                 goto reject;
760
761         return 0;
762 reject:
763         cma_modify_qp_err(&id_priv->id);
764         ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
765                        NULL, 0, NULL, 0);
766         return ret;
767 }
768
769 static int cma_verify_rep(struct rdma_id_private *id_priv, void *data)
770 {
771         if (id_priv->id.ps == RDMA_PS_SDP &&
772             sdp_get_majv(((struct sdp_hah *) data)->sdp_version) !=
773             SDP_MAJ_VERSION)
774                 return -EINVAL;
775
776         return 0;
777 }
778
779 static int cma_rtu_recv(struct rdma_id_private *id_priv)
780 {
781         int ret;
782
783         ret = cma_modify_qp_rts(&id_priv->id);
784         if (ret)
785                 goto reject;
786
787         return 0;
788 reject:
789         cma_modify_qp_err(&id_priv->id);
790         ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
791                        NULL, 0, NULL, 0);
792         return ret;
793 }
794
795 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
796 {
797         struct rdma_id_private *id_priv = cm_id->context;
798         enum rdma_cm_event_type event;
799         u8 private_data_len = 0;
800         int ret = 0, status = 0;
801
802         atomic_inc(&id_priv->dev_remove);
803         if (!cma_comp(id_priv, CMA_CONNECT))
804                 goto out;
805
806         switch (ib_event->event) {
807         case IB_CM_REQ_ERROR:
808         case IB_CM_REP_ERROR:
809                 event = RDMA_CM_EVENT_UNREACHABLE;
810                 status = -ETIMEDOUT;
811                 break;
812         case IB_CM_REP_RECEIVED:
813                 status = cma_verify_rep(id_priv, ib_event->private_data);
814                 if (status)
815                         event = RDMA_CM_EVENT_CONNECT_ERROR;
816                 else if (id_priv->id.qp && id_priv->id.ps != RDMA_PS_SDP) {
817                         status = cma_rep_recv(id_priv);
818                         event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
819                                          RDMA_CM_EVENT_ESTABLISHED;
820                 } else
821                         event = RDMA_CM_EVENT_CONNECT_RESPONSE;
822                 private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
823                 break;
824         case IB_CM_RTU_RECEIVED:
825                 status = cma_rtu_recv(id_priv);
826                 event = status ? RDMA_CM_EVENT_CONNECT_ERROR :
827                                  RDMA_CM_EVENT_ESTABLISHED;
828                 break;
829         case IB_CM_DREQ_ERROR:
830                 status = -ETIMEDOUT; /* fall through */
831         case IB_CM_DREQ_RECEIVED:
832         case IB_CM_DREP_RECEIVED:
833                 if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
834                         goto out;
835                 event = RDMA_CM_EVENT_DISCONNECTED;
836                 break;
837         case IB_CM_TIMEWAIT_EXIT:
838         case IB_CM_MRA_RECEIVED:
839                 /* ignore event */
840                 goto out;
841         case IB_CM_REJ_RECEIVED:
842                 cma_modify_qp_err(&id_priv->id);
843                 status = ib_event->param.rej_rcvd.reason;
844                 event = RDMA_CM_EVENT_REJECTED;
845                 private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
846                 break;
847         default:
848                 printk(KERN_ERR "RDMA CMA: unexpected IB CM event: %d",
849                        ib_event->event);
850                 goto out;
851         }
852
853         ret = cma_notify_user(id_priv, event, status, ib_event->private_data,
854                               private_data_len);
855         if (ret) {
856                 /* Destroy the CM ID by returning a non-zero value. */
857                 id_priv->cm_id.ib = NULL;
858                 cma_exch(id_priv, CMA_DESTROYING);
859                 cma_release_remove(id_priv);
860                 rdma_destroy_id(&id_priv->id);
861                 return ret;
862         }
863 out:
864         cma_release_remove(id_priv);
865         return ret;
866 }
867
868 static struct rdma_id_private *cma_new_id(struct rdma_cm_id *listen_id,
869                                           struct ib_cm_event *ib_event)
870 {
871         struct rdma_id_private *id_priv;
872         struct rdma_cm_id *id;
873         struct rdma_route *rt;
874         union cma_ip_addr *src, *dst;
875         __u16 port;
876         u8 ip_ver;
877
878         if (cma_get_net_info(ib_event->private_data, listen_id->ps,
879                              &ip_ver, &port, &src, &dst))
880                 goto err;
881
882         id = rdma_create_id(listen_id->event_handler, listen_id->context,
883                             listen_id->ps);
884         if (IS_ERR(id))
885                 goto err;
886
887         cma_save_net_info(&id->route.addr, &listen_id->route.addr,
888                           ip_ver, port, src, dst);
889
890         rt = &id->route;
891         rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
892         rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
893                                GFP_KERNEL);
894         if (!rt->path_rec)
895                 goto destroy_id;
896
897         rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
898         if (rt->num_paths == 2)
899                 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
900
901         ib_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
902         ib_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
903         ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
904         rt->addr.dev_addr.dev_type = RDMA_NODE_IB_CA;
905
906         id_priv = container_of(id, struct rdma_id_private, id);
907         id_priv->state = CMA_CONNECT;
908         return id_priv;
909
910 destroy_id:
911         rdma_destroy_id(id);
912 err:
913         return NULL;
914 }
915
916 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
917 {
918         struct rdma_id_private *listen_id, *conn_id;
919         int offset, ret;
920
921         listen_id = cm_id->context;
922         atomic_inc(&listen_id->dev_remove);
923         if (!cma_comp(listen_id, CMA_LISTEN)) {
924                 ret = -ECONNABORTED;
925                 goto out;
926         }
927
928         conn_id = cma_new_id(&listen_id->id, ib_event);
929         if (!conn_id) {
930                 ret = -ENOMEM;
931                 goto out;
932         }
933
934         atomic_inc(&conn_id->dev_remove);
935         mutex_lock(&lock);
936         ret = cma_acquire_dev(conn_id);
937         mutex_unlock(&lock);
938         if (ret) {
939                 ret = -ENODEV;
940                 cma_exch(conn_id, CMA_DESTROYING);
941                 cma_release_remove(conn_id);
942                 rdma_destroy_id(&conn_id->id);
943                 goto out;
944         }
945
946         conn_id->cm_id.ib = cm_id;
947         cm_id->context = conn_id;
948         cm_id->cm_handler = cma_ib_handler;
949
950         offset = cma_user_data_offset(listen_id->id.ps);
951         ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
952                               ib_event->private_data + offset,
953                               IB_CM_REQ_PRIVATE_DATA_SIZE - offset);
954         if (ret) {
955                 /* Destroy the CM ID by returning a non-zero value. */
956                 conn_id->cm_id.ib = NULL;
957                 cma_exch(conn_id, CMA_DESTROYING);
958                 cma_release_remove(conn_id);
959                 rdma_destroy_id(&conn_id->id);
960         }
961 out:
962         cma_release_remove(listen_id);
963         return ret;
964 }
965
966 static __be64 cma_get_service_id(enum rdma_port_space ps, struct sockaddr *addr)
967 {
968         return cpu_to_be64(((u64)ps << 16) +
969                be16_to_cpu(((struct sockaddr_in *) addr)->sin_port));
970 }
971
972 static void cma_set_compare_data(enum rdma_port_space ps, struct sockaddr *addr,
973                                  struct ib_cm_compare_data *compare)
974 {
975         struct cma_hdr *cma_data, *cma_mask;
976         struct sdp_hh *sdp_data, *sdp_mask;
977         __u32 ip4_addr;
978         struct in6_addr ip6_addr;
979
980         memset(compare, 0, sizeof *compare);
981         cma_data = (void *) compare->data;
982         cma_mask = (void *) compare->mask;
983         sdp_data = (void *) compare->data;
984         sdp_mask = (void *) compare->mask;
985
986         switch (addr->sa_family) {
987         case AF_INET:
988                 ip4_addr = ((struct sockaddr_in *) addr)->sin_addr.s_addr;
989                 if (ps == RDMA_PS_SDP) {
990                         sdp_set_ip_ver(sdp_data, 4);
991                         sdp_set_ip_ver(sdp_mask, 0xF);
992                         sdp_data->dst_addr.ip4.addr = ip4_addr;
993                         sdp_mask->dst_addr.ip4.addr = ~0;
994                 } else {
995                         cma_set_ip_ver(cma_data, 4);
996                         cma_set_ip_ver(cma_mask, 0xF);
997                         cma_data->dst_addr.ip4.addr = ip4_addr;
998                         cma_mask->dst_addr.ip4.addr = ~0;
999                 }
1000                 break;
1001         case AF_INET6:
1002                 ip6_addr = ((struct sockaddr_in6 *) addr)->sin6_addr;
1003                 if (ps == RDMA_PS_SDP) {
1004                         sdp_set_ip_ver(sdp_data, 6);
1005                         sdp_set_ip_ver(sdp_mask, 0xF);
1006                         sdp_data->dst_addr.ip6 = ip6_addr;
1007                         memset(&sdp_mask->dst_addr.ip6, 0xFF,
1008                                sizeof sdp_mask->dst_addr.ip6);
1009                 } else {
1010                         cma_set_ip_ver(cma_data, 6);
1011                         cma_set_ip_ver(cma_mask, 0xF);
1012                         cma_data->dst_addr.ip6 = ip6_addr;
1013                         memset(&cma_mask->dst_addr.ip6, 0xFF,
1014                                sizeof cma_mask->dst_addr.ip6);
1015                 }
1016                 break;
1017         default:
1018                 break;
1019         }
1020 }
1021
1022 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
1023 {
1024         struct rdma_id_private *id_priv = iw_id->context;
1025         enum rdma_cm_event_type event = 0;
1026         struct sockaddr_in *sin;
1027         int ret = 0;
1028
1029         atomic_inc(&id_priv->dev_remove);
1030
1031         switch (iw_event->event) {
1032         case IW_CM_EVENT_CLOSE:
1033                 event = RDMA_CM_EVENT_DISCONNECTED;
1034                 break;
1035         case IW_CM_EVENT_CONNECT_REPLY:
1036                 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1037                 *sin = iw_event->local_addr;
1038                 sin = (struct sockaddr_in *) &id_priv->id.route.addr.dst_addr;
1039                 *sin = iw_event->remote_addr;
1040                 if (iw_event->status)
1041                         event = RDMA_CM_EVENT_REJECTED;
1042                 else
1043                         event = RDMA_CM_EVENT_ESTABLISHED;
1044                 break;
1045         case IW_CM_EVENT_ESTABLISHED:
1046                 event = RDMA_CM_EVENT_ESTABLISHED;
1047                 break;
1048         default:
1049                 BUG_ON(1);
1050         }
1051
1052         ret = cma_notify_user(id_priv, event, iw_event->status,
1053                               iw_event->private_data,
1054                               iw_event->private_data_len);
1055         if (ret) {
1056                 /* Destroy the CM ID by returning a non-zero value. */
1057                 id_priv->cm_id.iw = NULL;
1058                 cma_exch(id_priv, CMA_DESTROYING);
1059                 cma_release_remove(id_priv);
1060                 rdma_destroy_id(&id_priv->id);
1061                 return ret;
1062         }
1063
1064         cma_release_remove(id_priv);
1065         return ret;
1066 }
1067
1068 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1069                                struct iw_cm_event *iw_event)
1070 {
1071         struct rdma_cm_id *new_cm_id;
1072         struct rdma_id_private *listen_id, *conn_id;
1073         struct sockaddr_in *sin;
1074         struct net_device *dev = NULL;
1075         int ret;
1076
1077         listen_id = cm_id->context;
1078         atomic_inc(&listen_id->dev_remove);
1079         if (!cma_comp(listen_id, CMA_LISTEN)) {
1080                 ret = -ECONNABORTED;
1081                 goto out;
1082         }
1083
1084         /* Create a new RDMA id for the new IW CM ID */
1085         new_cm_id = rdma_create_id(listen_id->id.event_handler,
1086                                    listen_id->id.context,
1087                                    RDMA_PS_TCP);
1088         if (!new_cm_id) {
1089                 ret = -ENOMEM;
1090                 goto out;
1091         }
1092         conn_id = container_of(new_cm_id, struct rdma_id_private, id);
1093         atomic_inc(&conn_id->dev_remove);
1094         conn_id->state = CMA_CONNECT;
1095
1096         dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr);
1097         if (!dev) {
1098                 ret = -EADDRNOTAVAIL;
1099                 cma_release_remove(conn_id);
1100                 rdma_destroy_id(new_cm_id);
1101                 goto out;
1102         }
1103         ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
1104         if (ret) {
1105                 cma_release_remove(conn_id);
1106                 rdma_destroy_id(new_cm_id);
1107                 goto out;
1108         }
1109
1110         mutex_lock(&lock);
1111         ret = cma_acquire_dev(conn_id);
1112         mutex_unlock(&lock);
1113         if (ret) {
1114                 cma_release_remove(conn_id);
1115                 rdma_destroy_id(new_cm_id);
1116                 goto out;
1117         }
1118
1119         conn_id->cm_id.iw = cm_id;
1120         cm_id->context = conn_id;
1121         cm_id->cm_handler = cma_iw_handler;
1122
1123         sin = (struct sockaddr_in *) &new_cm_id->route.addr.src_addr;
1124         *sin = iw_event->local_addr;
1125         sin = (struct sockaddr_in *) &new_cm_id->route.addr.dst_addr;
1126         *sin = iw_event->remote_addr;
1127
1128         ret = cma_notify_user(conn_id, RDMA_CM_EVENT_CONNECT_REQUEST, 0,
1129                               iw_event->private_data,
1130                               iw_event->private_data_len);
1131         if (ret) {
1132                 /* User wants to destroy the CM ID */
1133                 conn_id->cm_id.iw = NULL;
1134                 cma_exch(conn_id, CMA_DESTROYING);
1135                 cma_release_remove(conn_id);
1136                 rdma_destroy_id(&conn_id->id);
1137         }
1138
1139 out:
1140         if (dev)
1141                 dev_put(dev);
1142         cma_release_remove(listen_id);
1143         return ret;
1144 }
1145
1146 static int cma_ib_listen(struct rdma_id_private *id_priv)
1147 {
1148         struct ib_cm_compare_data compare_data;
1149         struct sockaddr *addr;
1150         __be64 svc_id;
1151         int ret;
1152
1153         id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
1154                                             id_priv);
1155         if (IS_ERR(id_priv->cm_id.ib))
1156                 return PTR_ERR(id_priv->cm_id.ib);
1157
1158         addr = &id_priv->id.route.addr.src_addr;
1159         svc_id = cma_get_service_id(id_priv->id.ps, addr);
1160         if (cma_any_addr(addr))
1161                 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, NULL);
1162         else {
1163                 cma_set_compare_data(id_priv->id.ps, addr, &compare_data);
1164                 ret = ib_cm_listen(id_priv->cm_id.ib, svc_id, 0, &compare_data);
1165         }
1166
1167         if (ret) {
1168                 ib_destroy_cm_id(id_priv->cm_id.ib);
1169                 id_priv->cm_id.ib = NULL;
1170         }
1171
1172         return ret;
1173 }
1174
1175 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1176 {
1177         int ret;
1178         struct sockaddr_in *sin;
1179
1180         id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
1181                                             iw_conn_req_handler,
1182                                             id_priv);
1183         if (IS_ERR(id_priv->cm_id.iw))
1184                 return PTR_ERR(id_priv->cm_id.iw);
1185
1186         sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1187         id_priv->cm_id.iw->local_addr = *sin;
1188
1189         ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
1190
1191         if (ret) {
1192                 iw_destroy_cm_id(id_priv->cm_id.iw);
1193                 id_priv->cm_id.iw = NULL;
1194         }
1195
1196         return ret;
1197 }
1198
1199 static int cma_listen_handler(struct rdma_cm_id *id,
1200                               struct rdma_cm_event *event)
1201 {
1202         struct rdma_id_private *id_priv = id->context;
1203
1204         id->context = id_priv->id.context;
1205         id->event_handler = id_priv->id.event_handler;
1206         return id_priv->id.event_handler(id, event);
1207 }
1208
1209 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
1210                               struct cma_device *cma_dev)
1211 {
1212         struct rdma_id_private *dev_id_priv;
1213         struct rdma_cm_id *id;
1214         int ret;
1215
1216         id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
1217         if (IS_ERR(id))
1218                 return;
1219
1220         dev_id_priv = container_of(id, struct rdma_id_private, id);
1221
1222         dev_id_priv->state = CMA_ADDR_BOUND;
1223         memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
1224                ip_addr_size(&id_priv->id.route.addr.src_addr));
1225
1226         cma_attach_to_dev(dev_id_priv, cma_dev);
1227         list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
1228
1229         ret = rdma_listen(id, id_priv->backlog);
1230         if (ret)
1231                 goto err;
1232
1233         return;
1234 err:
1235         cma_destroy_listen(dev_id_priv);
1236 }
1237
1238 static void cma_listen_on_all(struct rdma_id_private *id_priv)
1239 {
1240         struct cma_device *cma_dev;
1241
1242         mutex_lock(&lock);
1243         list_add_tail(&id_priv->list, &listen_any_list);
1244         list_for_each_entry(cma_dev, &dev_list, list)
1245                 cma_listen_on_dev(id_priv, cma_dev);
1246         mutex_unlock(&lock);
1247 }
1248
1249 static int cma_bind_any(struct rdma_cm_id *id, sa_family_t af)
1250 {
1251         struct sockaddr_in addr_in;
1252
1253         memset(&addr_in, 0, sizeof addr_in);
1254         addr_in.sin_family = af;
1255         return rdma_bind_addr(id, (struct sockaddr *) &addr_in);
1256 }
1257
1258 int rdma_listen(struct rdma_cm_id *id, int backlog)
1259 {
1260         struct rdma_id_private *id_priv;
1261         int ret;
1262
1263         id_priv = container_of(id, struct rdma_id_private, id);
1264         if (id_priv->state == CMA_IDLE) {
1265                 ret = cma_bind_any(id, AF_INET);
1266                 if (ret)
1267                         return ret;
1268         }
1269
1270         if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1271                 return -EINVAL;
1272
1273         id_priv->backlog = backlog;
1274         if (id->device) {
1275                 switch (rdma_node_get_transport(id->device->node_type)) {
1276                 case RDMA_TRANSPORT_IB:
1277                         ret = cma_ib_listen(id_priv);
1278                         if (ret)
1279                                 goto err;
1280                         break;
1281                 case RDMA_TRANSPORT_IWARP:
1282                         ret = cma_iw_listen(id_priv, backlog);
1283                         if (ret)
1284                                 goto err;
1285                         break;
1286                 default:
1287                         ret = -ENOSYS;
1288                         goto err;
1289                 }
1290         } else
1291                 cma_listen_on_all(id_priv);
1292
1293         return 0;
1294 err:
1295         id_priv->backlog = 0;
1296         cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1297         return ret;
1298 }
1299 EXPORT_SYMBOL(rdma_listen);
1300
1301 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
1302                               void *context)
1303 {
1304         struct cma_work *work = context;
1305         struct rdma_route *route;
1306
1307         route = &work->id->id.route;
1308
1309         if (!status) {
1310                 route->num_paths = 1;
1311                 *route->path_rec = *path_rec;
1312         } else {
1313                 work->old_state = CMA_ROUTE_QUERY;
1314                 work->new_state = CMA_ADDR_RESOLVED;
1315                 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
1316                 work->event.status = status;
1317         }
1318
1319         queue_work(cma_wq, &work->work);
1320 }
1321
1322 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
1323                               struct cma_work *work)
1324 {
1325         struct rdma_dev_addr *addr = &id_priv->id.route.addr.dev_addr;
1326         struct ib_sa_path_rec path_rec;
1327
1328         memset(&path_rec, 0, sizeof path_rec);
1329         ib_addr_get_sgid(addr, &path_rec.sgid);
1330         ib_addr_get_dgid(addr, &path_rec.dgid);
1331         path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
1332         path_rec.numb_path = 1;
1333
1334         id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
1335                                 id_priv->id.port_num, &path_rec,
1336                                 IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
1337                                 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH,
1338                                 timeout_ms, GFP_KERNEL,
1339                                 cma_query_handler, work, &id_priv->query);
1340
1341         return (id_priv->query_id < 0) ? id_priv->query_id : 0;
1342 }
1343
1344 static void cma_work_handler(void *data)
1345 {
1346         struct cma_work *work = data;
1347         struct rdma_id_private *id_priv = work->id;
1348         int destroy = 0;
1349
1350         atomic_inc(&id_priv->dev_remove);
1351         if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
1352                 goto out;
1353
1354         if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
1355                 cma_exch(id_priv, CMA_DESTROYING);
1356                 destroy = 1;
1357         }
1358 out:
1359         cma_release_remove(id_priv);
1360         cma_deref_id(id_priv);
1361         if (destroy)
1362                 rdma_destroy_id(&id_priv->id);
1363         kfree(work);
1364 }
1365
1366 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
1367 {
1368         struct rdma_route *route = &id_priv->id.route;
1369         struct cma_work *work;
1370         int ret;
1371
1372         work = kzalloc(sizeof *work, GFP_KERNEL);
1373         if (!work)
1374                 return -ENOMEM;
1375
1376         work->id = id_priv;
1377         INIT_WORK(&work->work, cma_work_handler, work);
1378         work->old_state = CMA_ROUTE_QUERY;
1379         work->new_state = CMA_ROUTE_RESOLVED;
1380         work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1381
1382         route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
1383         if (!route->path_rec) {
1384                 ret = -ENOMEM;
1385                 goto err1;
1386         }
1387
1388         ret = cma_query_ib_route(id_priv, timeout_ms, work);
1389         if (ret)
1390                 goto err2;
1391
1392         return 0;
1393 err2:
1394         kfree(route->path_rec);
1395         route->path_rec = NULL;
1396 err1:
1397         kfree(work);
1398         return ret;
1399 }
1400
1401 int rdma_set_ib_paths(struct rdma_cm_id *id,
1402                       struct ib_sa_path_rec *path_rec, int num_paths)
1403 {
1404         struct rdma_id_private *id_priv;
1405         int ret;
1406
1407         id_priv = container_of(id, struct rdma_id_private, id);
1408         if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
1409                 return -EINVAL;
1410
1411         id->route.path_rec = kmalloc(sizeof *path_rec * num_paths, GFP_KERNEL);
1412         if (!id->route.path_rec) {
1413                 ret = -ENOMEM;
1414                 goto err;
1415         }
1416
1417         memcpy(id->route.path_rec, path_rec, sizeof *path_rec * num_paths);
1418         return 0;
1419 err:
1420         cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
1421         return ret;
1422 }
1423 EXPORT_SYMBOL(rdma_set_ib_paths);
1424
1425 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
1426 {
1427         struct cma_work *work;
1428
1429         work = kzalloc(sizeof *work, GFP_KERNEL);
1430         if (!work)
1431                 return -ENOMEM;
1432
1433         work->id = id_priv;
1434         INIT_WORK(&work->work, cma_work_handler, work);
1435         work->old_state = CMA_ROUTE_QUERY;
1436         work->new_state = CMA_ROUTE_RESOLVED;
1437         work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1438         queue_work(cma_wq, &work->work);
1439         return 0;
1440 }
1441
1442 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
1443 {
1444         struct rdma_id_private *id_priv;
1445         int ret;
1446
1447         id_priv = container_of(id, struct rdma_id_private, id);
1448         if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
1449                 return -EINVAL;
1450
1451         atomic_inc(&id_priv->refcount);
1452         switch (rdma_node_get_transport(id->device->node_type)) {
1453         case RDMA_TRANSPORT_IB:
1454                 ret = cma_resolve_ib_route(id_priv, timeout_ms);
1455                 break;
1456         case RDMA_TRANSPORT_IWARP:
1457                 ret = cma_resolve_iw_route(id_priv, timeout_ms);
1458                 break;
1459         default:
1460                 ret = -ENOSYS;
1461                 break;
1462         }
1463         if (ret)
1464                 goto err;
1465
1466         return 0;
1467 err:
1468         cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
1469         cma_deref_id(id_priv);
1470         return ret;
1471 }
1472 EXPORT_SYMBOL(rdma_resolve_route);
1473
1474 static int cma_bind_loopback(struct rdma_id_private *id_priv)
1475 {
1476         struct cma_device *cma_dev;
1477         struct ib_port_attr port_attr;
1478         union ib_gid gid;
1479         u16 pkey;
1480         int ret;
1481         u8 p;
1482
1483         mutex_lock(&lock);
1484         if (list_empty(&dev_list)) {
1485                 ret = -ENODEV;
1486                 goto out;
1487         }
1488         list_for_each_entry(cma_dev, &dev_list, list)
1489                 for (p = 1; p <= cma_dev->device->phys_port_cnt; ++p)
1490                         if (!ib_query_port(cma_dev->device, p, &port_attr) &&
1491                             port_attr.state == IB_PORT_ACTIVE)
1492                                 goto port_found;
1493
1494         p = 1;
1495         cma_dev = list_entry(dev_list.next, struct cma_device, list);
1496
1497 port_found:
1498         ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid);
1499         if (ret)
1500                 goto out;
1501
1502         ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
1503         if (ret)
1504                 goto out;
1505
1506         ib_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1507         ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
1508         id_priv->id.port_num = p;
1509         cma_attach_to_dev(id_priv, cma_dev);
1510 out:
1511         mutex_unlock(&lock);
1512         return ret;
1513 }
1514
1515 static void addr_handler(int status, struct sockaddr *src_addr,
1516                          struct rdma_dev_addr *dev_addr, void *context)
1517 {
1518         struct rdma_id_private *id_priv = context;
1519         enum rdma_cm_event_type event;
1520
1521         atomic_inc(&id_priv->dev_remove);
1522
1523         /*
1524          * Grab mutex to block rdma_destroy_id() from removing the device while
1525          * we're trying to acquire it.
1526          */
1527         mutex_lock(&lock);
1528         if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1529                 mutex_unlock(&lock);
1530                 goto out;
1531         }
1532
1533         if (!status && !id_priv->cma_dev)
1534                 status = cma_acquire_dev(id_priv);
1535         mutex_unlock(&lock);
1536
1537         if (status) {
1538                 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
1539                         goto out;
1540                 event = RDMA_CM_EVENT_ADDR_ERROR;
1541         } else {
1542                 memcpy(&id_priv->id.route.addr.src_addr, src_addr,
1543                        ip_addr_size(src_addr));
1544                 event = RDMA_CM_EVENT_ADDR_RESOLVED;
1545         }
1546
1547         if (cma_notify_user(id_priv, event, status, NULL, 0)) {
1548                 cma_exch(id_priv, CMA_DESTROYING);
1549                 cma_release_remove(id_priv);
1550                 cma_deref_id(id_priv);
1551                 rdma_destroy_id(&id_priv->id);
1552                 return;
1553         }
1554 out:
1555         cma_release_remove(id_priv);
1556         cma_deref_id(id_priv);
1557 }
1558
1559 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
1560 {
1561         struct cma_work *work;
1562         struct sockaddr_in *src_in, *dst_in;
1563         union ib_gid gid;
1564         int ret;
1565
1566         work = kzalloc(sizeof *work, GFP_KERNEL);
1567         if (!work)
1568                 return -ENOMEM;
1569
1570         if (!id_priv->cma_dev) {
1571                 ret = cma_bind_loopback(id_priv);
1572                 if (ret)
1573                         goto err;
1574         }
1575
1576         ib_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
1577         ib_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
1578
1579         if (cma_zero_addr(&id_priv->id.route.addr.src_addr)) {
1580                 src_in = (struct sockaddr_in *)&id_priv->id.route.addr.src_addr;
1581                 dst_in = (struct sockaddr_in *)&id_priv->id.route.addr.dst_addr;
1582                 src_in->sin_family = dst_in->sin_family;
1583                 src_in->sin_addr.s_addr = dst_in->sin_addr.s_addr;
1584         }
1585
1586         work->id = id_priv;
1587         INIT_WORK(&work->work, cma_work_handler, work);
1588         work->old_state = CMA_ADDR_QUERY;
1589         work->new_state = CMA_ADDR_RESOLVED;
1590         work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
1591         queue_work(cma_wq, &work->work);
1592         return 0;
1593 err:
1594         kfree(work);
1595         return ret;
1596 }
1597
1598 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1599                          struct sockaddr *dst_addr)
1600 {
1601         if (src_addr && src_addr->sa_family)
1602                 return rdma_bind_addr(id, src_addr);
1603         else
1604                 return cma_bind_any(id, dst_addr->sa_family);
1605 }
1606
1607 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
1608                       struct sockaddr *dst_addr, int timeout_ms)
1609 {
1610         struct rdma_id_private *id_priv;
1611         int ret;
1612
1613         id_priv = container_of(id, struct rdma_id_private, id);
1614         if (id_priv->state == CMA_IDLE) {
1615                 ret = cma_bind_addr(id, src_addr, dst_addr);
1616                 if (ret)
1617                         return ret;
1618         }
1619
1620         if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
1621                 return -EINVAL;
1622
1623         atomic_inc(&id_priv->refcount);
1624         memcpy(&id->route.addr.dst_addr, dst_addr, ip_addr_size(dst_addr));
1625         if (cma_any_addr(dst_addr))
1626                 ret = cma_resolve_loopback(id_priv);
1627         else
1628                 ret = rdma_resolve_ip(&addr_client, &id->route.addr.src_addr,
1629                                       dst_addr, &id->route.addr.dev_addr,
1630                                       timeout_ms, addr_handler, id_priv);
1631         if (ret)
1632                 goto err;
1633
1634         return 0;
1635 err:
1636         cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
1637         cma_deref_id(id_priv);
1638         return ret;
1639 }
1640 EXPORT_SYMBOL(rdma_resolve_addr);
1641
1642 static void cma_bind_port(struct rdma_bind_list *bind_list,
1643                           struct rdma_id_private *id_priv)
1644 {
1645         struct sockaddr_in *sin;
1646
1647         sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1648         sin->sin_port = htons(bind_list->port);
1649         id_priv->bind_list = bind_list;
1650         hlist_add_head(&id_priv->node, &bind_list->owners);
1651 }
1652
1653 static int cma_alloc_port(struct idr *ps, struct rdma_id_private *id_priv,
1654                           unsigned short snum)
1655 {
1656         struct rdma_bind_list *bind_list;
1657         int port, start, ret;
1658
1659         bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
1660         if (!bind_list)
1661                 return -ENOMEM;
1662
1663         start = snum ? snum : sysctl_local_port_range[0];
1664
1665         do {
1666                 ret = idr_get_new_above(ps, bind_list, start, &port);
1667         } while ((ret == -EAGAIN) && idr_pre_get(ps, GFP_KERNEL));
1668
1669         if (ret)
1670                 goto err;
1671
1672         if ((snum && port != snum) ||
1673             (!snum && port > sysctl_local_port_range[1])) {
1674                 idr_remove(ps, port);
1675                 ret = -EADDRNOTAVAIL;
1676                 goto err;
1677         }
1678
1679         bind_list->ps = ps;
1680         bind_list->port = (unsigned short) port;
1681         cma_bind_port(bind_list, id_priv);
1682         return 0;
1683 err:
1684         kfree(bind_list);
1685         return ret;
1686 }
1687
1688 static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
1689 {
1690         struct rdma_id_private *cur_id;
1691         struct sockaddr_in *sin, *cur_sin;
1692         struct rdma_bind_list *bind_list;
1693         struct hlist_node *node;
1694         unsigned short snum;
1695
1696         sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1697         snum = ntohs(sin->sin_port);
1698         if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
1699                 return -EACCES;
1700
1701         bind_list = idr_find(ps, snum);
1702         if (!bind_list)
1703                 return cma_alloc_port(ps, id_priv, snum);
1704
1705         /*
1706          * We don't support binding to any address if anyone is bound to
1707          * a specific address on the same port.
1708          */
1709         if (cma_any_addr(&id_priv->id.route.addr.src_addr))
1710                 return -EADDRNOTAVAIL;
1711
1712         hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
1713                 if (cma_any_addr(&cur_id->id.route.addr.src_addr))
1714                         return -EADDRNOTAVAIL;
1715
1716                 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
1717                 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
1718                         return -EADDRINUSE;
1719         }
1720
1721         cma_bind_port(bind_list, id_priv);
1722         return 0;
1723 }
1724
1725 static int cma_get_port(struct rdma_id_private *id_priv)
1726 {
1727         struct idr *ps;
1728         int ret;
1729
1730         switch (id_priv->id.ps) {
1731         case RDMA_PS_SDP:
1732                 ps = &sdp_ps;
1733                 break;
1734         case RDMA_PS_TCP:
1735                 ps = &tcp_ps;
1736                 break;
1737         default:
1738                 return -EPROTONOSUPPORT;
1739         }
1740
1741         mutex_lock(&lock);
1742         if (cma_any_port(&id_priv->id.route.addr.src_addr))
1743                 ret = cma_alloc_port(ps, id_priv, 0);
1744         else
1745                 ret = cma_use_port(ps, id_priv);
1746         mutex_unlock(&lock);
1747
1748         return ret;
1749 }
1750
1751 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
1752 {
1753         struct rdma_id_private *id_priv;
1754         int ret;
1755
1756         if (addr->sa_family != AF_INET)
1757                 return -EAFNOSUPPORT;
1758
1759         id_priv = container_of(id, struct rdma_id_private, id);
1760         if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
1761                 return -EINVAL;
1762
1763         if (!cma_any_addr(addr)) {
1764                 ret = rdma_translate_ip(addr, &id->route.addr.dev_addr);
1765                 if (ret)
1766                         goto err1;
1767
1768                 mutex_lock(&lock);
1769                 ret = cma_acquire_dev(id_priv);
1770                 mutex_unlock(&lock);
1771                 if (ret)
1772                         goto err1;
1773         }
1774
1775         memcpy(&id->route.addr.src_addr, addr, ip_addr_size(addr));
1776         ret = cma_get_port(id_priv);
1777         if (ret)
1778                 goto err2;
1779
1780         return 0;
1781 err2:
1782         if (!cma_any_addr(addr)) {
1783                 mutex_lock(&lock);
1784                 cma_detach_from_dev(id_priv);
1785                 mutex_unlock(&lock);
1786         }
1787 err1:
1788         cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
1789         return ret;
1790 }
1791 EXPORT_SYMBOL(rdma_bind_addr);
1792
1793 static int cma_format_hdr(void *hdr, enum rdma_port_space ps,
1794                           struct rdma_route *route)
1795 {
1796         struct sockaddr_in *src4, *dst4;
1797         struct cma_hdr *cma_hdr;
1798         struct sdp_hh *sdp_hdr;
1799
1800         src4 = (struct sockaddr_in *) &route->addr.src_addr;
1801         dst4 = (struct sockaddr_in *) &route->addr.dst_addr;
1802
1803         switch (ps) {
1804         case RDMA_PS_SDP:
1805                 sdp_hdr = hdr;
1806                 if (sdp_get_majv(sdp_hdr->sdp_version) != SDP_MAJ_VERSION)
1807                         return -EINVAL;
1808                 sdp_set_ip_ver(sdp_hdr, 4);
1809                 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
1810                 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
1811                 sdp_hdr->port = src4->sin_port;
1812                 break;
1813         default:
1814                 cma_hdr = hdr;
1815                 cma_hdr->cma_version = CMA_VERSION;
1816                 cma_set_ip_ver(cma_hdr, 4);
1817                 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
1818                 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
1819                 cma_hdr->port = src4->sin_port;
1820                 break;
1821         }
1822         return 0;
1823 }
1824
1825 static int cma_connect_ib(struct rdma_id_private *id_priv,
1826                           struct rdma_conn_param *conn_param)
1827 {
1828         struct ib_cm_req_param req;
1829         struct rdma_route *route;
1830         void *private_data;
1831         int offset, ret;
1832
1833         memset(&req, 0, sizeof req);
1834         offset = cma_user_data_offset(id_priv->id.ps);
1835         req.private_data_len = offset + conn_param->private_data_len;
1836         private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
1837         if (!private_data)
1838                 return -ENOMEM;
1839
1840         if (conn_param->private_data && conn_param->private_data_len)
1841                 memcpy(private_data + offset, conn_param->private_data,
1842                        conn_param->private_data_len);
1843
1844         id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
1845                                             id_priv);
1846         if (IS_ERR(id_priv->cm_id.ib)) {
1847                 ret = PTR_ERR(id_priv->cm_id.ib);
1848                 goto out;
1849         }
1850
1851         route = &id_priv->id.route;
1852         ret = cma_format_hdr(private_data, id_priv->id.ps, route);
1853         if (ret)
1854                 goto out;
1855         req.private_data = private_data;
1856
1857         req.primary_path = &route->path_rec[0];
1858         if (route->num_paths == 2)
1859                 req.alternate_path = &route->path_rec[1];
1860
1861         req.service_id = cma_get_service_id(id_priv->id.ps,
1862                                             &route->addr.dst_addr);
1863         req.qp_num = id_priv->qp_num;
1864         req.qp_type = id_priv->qp_type;
1865         req.starting_psn = id_priv->seq_num;
1866         req.responder_resources = conn_param->responder_resources;
1867         req.initiator_depth = conn_param->initiator_depth;
1868         req.flow_control = conn_param->flow_control;
1869         req.retry_count = conn_param->retry_count;
1870         req.rnr_retry_count = conn_param->rnr_retry_count;
1871         req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
1872         req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
1873         req.max_cm_retries = CMA_MAX_CM_RETRIES;
1874         req.srq = id_priv->srq ? 1 : 0;
1875
1876         ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
1877 out:
1878         if (ret && !IS_ERR(id_priv->cm_id.ib)) {
1879                 ib_destroy_cm_id(id_priv->cm_id.ib);
1880                 id_priv->cm_id.ib = NULL;
1881         }
1882
1883         kfree(private_data);
1884         return ret;
1885 }
1886
1887 static int cma_connect_iw(struct rdma_id_private *id_priv,
1888                           struct rdma_conn_param *conn_param)
1889 {
1890         struct iw_cm_id *cm_id;
1891         struct sockaddr_in* sin;
1892         int ret;
1893         struct iw_cm_conn_param iw_param;
1894
1895         cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
1896         if (IS_ERR(cm_id)) {
1897                 ret = PTR_ERR(cm_id);
1898                 goto out;
1899         }
1900
1901         id_priv->cm_id.iw = cm_id;
1902
1903         sin = (struct sockaddr_in*) &id_priv->id.route.addr.src_addr;
1904         cm_id->local_addr = *sin;
1905
1906         sin = (struct sockaddr_in*) &id_priv->id.route.addr.dst_addr;
1907         cm_id->remote_addr = *sin;
1908
1909         ret = cma_modify_qp_rtr(&id_priv->id);
1910         if (ret)
1911                 goto out;
1912
1913         iw_param.ord = conn_param->initiator_depth;
1914         iw_param.ird = conn_param->responder_resources;
1915         iw_param.private_data = conn_param->private_data;
1916         iw_param.private_data_len = conn_param->private_data_len;
1917         if (id_priv->id.qp)
1918                 iw_param.qpn = id_priv->qp_num;
1919         else
1920                 iw_param.qpn = conn_param->qp_num;
1921         ret = iw_cm_connect(cm_id, &iw_param);
1922 out:
1923         if (ret && !IS_ERR(cm_id)) {
1924                 iw_destroy_cm_id(cm_id);
1925                 id_priv->cm_id.iw = NULL;
1926         }
1927         return ret;
1928 }
1929
1930 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
1931 {
1932         struct rdma_id_private *id_priv;
1933         int ret;
1934
1935         id_priv = container_of(id, struct rdma_id_private, id);
1936         if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
1937                 return -EINVAL;
1938
1939         if (!id->qp) {
1940                 id_priv->qp_num = conn_param->qp_num;
1941                 id_priv->qp_type = conn_param->qp_type;
1942                 id_priv->srq = conn_param->srq;
1943         }
1944
1945         switch (rdma_node_get_transport(id->device->node_type)) {
1946         case RDMA_TRANSPORT_IB:
1947                 ret = cma_connect_ib(id_priv, conn_param);
1948                 break;
1949         case RDMA_TRANSPORT_IWARP:
1950                 ret = cma_connect_iw(id_priv, conn_param);
1951                 break;
1952         default:
1953                 ret = -ENOSYS;
1954                 break;
1955         }
1956         if (ret)
1957                 goto err;
1958
1959         return 0;
1960 err:
1961         cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
1962         return ret;
1963 }
1964 EXPORT_SYMBOL(rdma_connect);
1965
1966 static int cma_accept_ib(struct rdma_id_private *id_priv,
1967                          struct rdma_conn_param *conn_param)
1968 {
1969         struct ib_cm_rep_param rep;
1970         int ret;
1971
1972         ret = cma_modify_qp_rtr(&id_priv->id);
1973         if (ret)
1974                 return ret;
1975
1976         memset(&rep, 0, sizeof rep);
1977         rep.qp_num = id_priv->qp_num;
1978         rep.starting_psn = id_priv->seq_num;
1979         rep.private_data = conn_param->private_data;
1980         rep.private_data_len = conn_param->private_data_len;
1981         rep.responder_resources = conn_param->responder_resources;
1982         rep.initiator_depth = conn_param->initiator_depth;
1983         rep.target_ack_delay = CMA_CM_RESPONSE_TIMEOUT;
1984         rep.failover_accepted = 0;
1985         rep.flow_control = conn_param->flow_control;
1986         rep.rnr_retry_count = conn_param->rnr_retry_count;
1987         rep.srq = id_priv->srq ? 1 : 0;
1988
1989         return ib_send_cm_rep(id_priv->cm_id.ib, &rep);
1990 }
1991
1992 static int cma_accept_iw(struct rdma_id_private *id_priv,
1993                   struct rdma_conn_param *conn_param)
1994 {
1995         struct iw_cm_conn_param iw_param;
1996         int ret;
1997
1998         ret = cma_modify_qp_rtr(&id_priv->id);
1999         if (ret)
2000                 return ret;
2001
2002         iw_param.ord = conn_param->initiator_depth;
2003         iw_param.ird = conn_param->responder_resources;
2004         iw_param.private_data = conn_param->private_data;
2005         iw_param.private_data_len = conn_param->private_data_len;
2006         if (id_priv->id.qp) {
2007                 iw_param.qpn = id_priv->qp_num;
2008         } else
2009                 iw_param.qpn = conn_param->qp_num;
2010
2011         return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
2012 }
2013
2014 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
2015 {
2016         struct rdma_id_private *id_priv;
2017         int ret;
2018
2019         id_priv = container_of(id, struct rdma_id_private, id);
2020         if (!cma_comp(id_priv, CMA_CONNECT))
2021                 return -EINVAL;
2022
2023         if (!id->qp && conn_param) {
2024                 id_priv->qp_num = conn_param->qp_num;
2025                 id_priv->qp_type = conn_param->qp_type;
2026                 id_priv->srq = conn_param->srq;
2027         }
2028
2029         switch (rdma_node_get_transport(id->device->node_type)) {
2030         case RDMA_TRANSPORT_IB:
2031                 if (conn_param)
2032                         ret = cma_accept_ib(id_priv, conn_param);
2033                 else
2034                         ret = cma_rep_recv(id_priv);
2035                 break;
2036         case RDMA_TRANSPORT_IWARP:
2037                 ret = cma_accept_iw(id_priv, conn_param);
2038                 break;
2039         default:
2040                 ret = -ENOSYS;
2041                 break;
2042         }
2043
2044         if (ret)
2045                 goto reject;
2046
2047         return 0;
2048 reject:
2049         cma_modify_qp_err(id);
2050         rdma_reject(id, NULL, 0);
2051         return ret;
2052 }
2053 EXPORT_SYMBOL(rdma_accept);
2054
2055 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2056                 u8 private_data_len)
2057 {
2058         struct rdma_id_private *id_priv;
2059         int ret;
2060
2061         id_priv = container_of(id, struct rdma_id_private, id);
2062         if (!cma_comp(id_priv, CMA_CONNECT))
2063                 return -EINVAL;
2064
2065         switch (rdma_node_get_transport(id->device->node_type)) {
2066         case RDMA_TRANSPORT_IB:
2067                 ret = ib_send_cm_rej(id_priv->cm_id.ib,
2068                                      IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
2069                                      private_data, private_data_len);
2070                 break;
2071         case RDMA_TRANSPORT_IWARP:
2072                 ret = iw_cm_reject(id_priv->cm_id.iw,
2073                                    private_data, private_data_len);
2074                 break;
2075         default:
2076                 ret = -ENOSYS;
2077                 break;
2078         }
2079         return ret;
2080 }
2081 EXPORT_SYMBOL(rdma_reject);
2082
2083 int rdma_disconnect(struct rdma_cm_id *id)
2084 {
2085         struct rdma_id_private *id_priv;
2086         int ret;
2087
2088         id_priv = container_of(id, struct rdma_id_private, id);
2089         if (!cma_comp(id_priv, CMA_CONNECT) &&
2090             !cma_comp(id_priv, CMA_DISCONNECT))
2091                 return -EINVAL;
2092
2093         switch (rdma_node_get_transport(id->device->node_type)) {
2094         case RDMA_TRANSPORT_IB:
2095                 ret = cma_modify_qp_err(id);
2096                 if (ret)
2097                         goto out;
2098                 /* Initiate or respond to a disconnect. */
2099                 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
2100                         ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
2101                 break;
2102         case RDMA_TRANSPORT_IWARP:
2103                 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
2104                 break;
2105         default:
2106                 ret = -EINVAL;
2107                 break;
2108         }
2109 out:
2110         return ret;
2111 }
2112 EXPORT_SYMBOL(rdma_disconnect);
2113
2114 static void cma_add_one(struct ib_device *device)
2115 {
2116         struct cma_device *cma_dev;
2117         struct rdma_id_private *id_priv;
2118
2119         cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
2120         if (!cma_dev)
2121                 return;
2122
2123         cma_dev->device = device;
2124         cma_dev->node_guid = device->node_guid;
2125
2126         init_completion(&cma_dev->comp);
2127         atomic_set(&cma_dev->refcount, 1);
2128         INIT_LIST_HEAD(&cma_dev->id_list);
2129         ib_set_client_data(device, &cma_client, cma_dev);
2130
2131         mutex_lock(&lock);
2132         list_add_tail(&cma_dev->list, &dev_list);
2133         list_for_each_entry(id_priv, &listen_any_list, list)
2134                 cma_listen_on_dev(id_priv, cma_dev);
2135         mutex_unlock(&lock);
2136 }
2137
2138 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
2139 {
2140         enum cma_state state;
2141
2142         /* Record that we want to remove the device */
2143         state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
2144         if (state == CMA_DESTROYING)
2145                 return 0;
2146
2147         cma_cancel_operation(id_priv, state);
2148         wait_event(id_priv->wait_remove, !atomic_read(&id_priv->dev_remove));
2149
2150         /* Check for destruction from another callback. */
2151         if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
2152                 return 0;
2153
2154         return cma_notify_user(id_priv, RDMA_CM_EVENT_DEVICE_REMOVAL,
2155                                0, NULL, 0);
2156 }
2157
2158 static void cma_process_remove(struct cma_device *cma_dev)
2159 {
2160         struct rdma_id_private *id_priv;
2161         int ret;
2162
2163         mutex_lock(&lock);
2164         while (!list_empty(&cma_dev->id_list)) {
2165                 id_priv = list_entry(cma_dev->id_list.next,
2166                                      struct rdma_id_private, list);
2167
2168                 if (cma_internal_listen(id_priv)) {
2169                         cma_destroy_listen(id_priv);
2170                         continue;
2171                 }
2172
2173                 list_del_init(&id_priv->list);
2174                 atomic_inc(&id_priv->refcount);
2175                 mutex_unlock(&lock);
2176
2177                 ret = cma_remove_id_dev(id_priv);
2178                 cma_deref_id(id_priv);
2179                 if (ret)
2180                         rdma_destroy_id(&id_priv->id);
2181
2182                 mutex_lock(&lock);
2183         }
2184         mutex_unlock(&lock);
2185
2186         cma_deref_dev(cma_dev);
2187         wait_for_completion(&cma_dev->comp);
2188 }
2189
2190 static void cma_remove_one(struct ib_device *device)
2191 {
2192         struct cma_device *cma_dev;
2193
2194         cma_dev = ib_get_client_data(device, &cma_client);
2195         if (!cma_dev)
2196                 return;
2197
2198         mutex_lock(&lock);
2199         list_del(&cma_dev->list);
2200         mutex_unlock(&lock);
2201
2202         cma_process_remove(cma_dev);
2203         kfree(cma_dev);
2204 }
2205
2206 static int cma_init(void)
2207 {
2208         int ret;
2209
2210         cma_wq = create_singlethread_workqueue("rdma_cm_wq");
2211         if (!cma_wq)
2212                 return -ENOMEM;
2213
2214         ib_sa_register_client(&sa_client);
2215         rdma_addr_register_client(&addr_client);
2216
2217         ret = ib_register_client(&cma_client);
2218         if (ret)
2219                 goto err;
2220         return 0;
2221
2222 err:
2223         rdma_addr_unregister_client(&addr_client);
2224         ib_sa_unregister_client(&sa_client);
2225         destroy_workqueue(cma_wq);
2226         return ret;
2227 }
2228
2229 static void cma_cleanup(void)
2230 {
2231         ib_unregister_client(&cma_client);
2232         rdma_addr_unregister_client(&addr_client);
2233         ib_sa_unregister_client(&sa_client);
2234         destroy_workqueue(cma_wq);
2235         idr_destroy(&sdp_ps);
2236         idr_destroy(&tcp_ps);
2237 }
2238
2239 module_init(cma_init);
2240 module_exit(cma_cleanup);