2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/sched.h>
38 #include <linux/idr.h>
40 #include <linux/in6.h>
41 #include <linux/miscdevice.h>
42 #include <linux/slab.h>
43 #include <linux/sysctl.h>
44 #include <linux/module.h>
46 #include <rdma/rdma_user_cm.h>
47 #include <rdma/ib_marshall.h>
48 #include <rdma/rdma_cm.h>
49 #include <rdma/rdma_cm_ib.h>
52 MODULE_AUTHOR("Sean Hefty");
53 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
54 MODULE_LICENSE("Dual BSD/GPL");
56 static unsigned int max_backlog = 1024;
58 static struct ctl_table_header *ucma_ctl_table_hdr;
59 static ctl_table ucma_ctl_table[] = {
61 .procname = "max_backlog",
63 .maxlen = sizeof max_backlog,
65 .proc_handler = proc_dointvec,
70 static struct ctl_path ucma_ctl_path[] = {
71 { .procname = "net" },
72 { .procname = "rdma_ucm" },
79 struct list_head ctx_list;
80 struct list_head event_list;
81 wait_queue_head_t poll_wait;
86 struct completion comp;
91 struct ucma_file *file;
92 struct rdma_cm_id *cm_id;
95 struct list_head list;
96 struct list_head mc_list;
99 struct ucma_multicast {
100 struct ucma_context *ctx;
105 struct list_head list;
106 struct sockaddr_storage addr;
110 struct ucma_context *ctx;
111 struct ucma_multicast *mc;
112 struct list_head list;
113 struct rdma_cm_id *cm_id;
114 struct rdma_ucm_event_resp resp;
117 static DEFINE_MUTEX(mut);
118 static DEFINE_IDR(ctx_idr);
119 static DEFINE_IDR(multicast_idr);
121 static inline struct ucma_context *_ucma_find_context(int id,
122 struct ucma_file *file)
124 struct ucma_context *ctx;
126 ctx = idr_find(&ctx_idr, id);
128 ctx = ERR_PTR(-ENOENT);
129 else if (ctx->file != file)
130 ctx = ERR_PTR(-EINVAL);
134 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
136 struct ucma_context *ctx;
139 ctx = _ucma_find_context(id, file);
141 atomic_inc(&ctx->ref);
146 static void ucma_put_ctx(struct ucma_context *ctx)
148 if (atomic_dec_and_test(&ctx->ref))
149 complete(&ctx->comp);
152 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
154 struct ucma_context *ctx;
157 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
161 atomic_set(&ctx->ref, 1);
162 init_completion(&ctx->comp);
163 INIT_LIST_HEAD(&ctx->mc_list);
167 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
172 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
174 } while (ret == -EAGAIN);
179 list_add_tail(&ctx->list, &file->ctx_list);
187 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
189 struct ucma_multicast *mc;
192 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
197 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
202 ret = idr_get_new(&multicast_idr, mc, &mc->id);
204 } while (ret == -EAGAIN);
210 list_add_tail(&mc->list, &ctx->mc_list);
218 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
219 struct rdma_conn_param *src)
221 if (src->private_data_len)
222 memcpy(dst->private_data, src->private_data,
223 src->private_data_len);
224 dst->private_data_len = src->private_data_len;
225 dst->responder_resources =src->responder_resources;
226 dst->initiator_depth = src->initiator_depth;
227 dst->flow_control = src->flow_control;
228 dst->retry_count = src->retry_count;
229 dst->rnr_retry_count = src->rnr_retry_count;
231 dst->qp_num = src->qp_num;
234 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
235 struct rdma_ud_param *src)
237 if (src->private_data_len)
238 memcpy(dst->private_data, src->private_data,
239 src->private_data_len);
240 dst->private_data_len = src->private_data_len;
241 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
242 dst->qp_num = src->qp_num;
243 dst->qkey = src->qkey;
246 static void ucma_set_event_context(struct ucma_context *ctx,
247 struct rdma_cm_event *event,
248 struct ucma_event *uevent)
251 switch (event->event) {
252 case RDMA_CM_EVENT_MULTICAST_JOIN:
253 case RDMA_CM_EVENT_MULTICAST_ERROR:
254 uevent->mc = (struct ucma_multicast *)
255 event->param.ud.private_data;
256 uevent->resp.uid = uevent->mc->uid;
257 uevent->resp.id = uevent->mc->id;
260 uevent->resp.uid = ctx->uid;
261 uevent->resp.id = ctx->id;
266 static int ucma_event_handler(struct rdma_cm_id *cm_id,
267 struct rdma_cm_event *event)
269 struct ucma_event *uevent;
270 struct ucma_context *ctx = cm_id->context;
273 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
275 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
277 uevent->cm_id = cm_id;
278 ucma_set_event_context(ctx, event, uevent);
279 uevent->resp.event = event->event;
280 uevent->resp.status = event->status;
281 if (cm_id->qp_type == IB_QPT_UD)
282 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
284 ucma_copy_conn_event(&uevent->resp.param.conn,
287 mutex_lock(&ctx->file->mut);
288 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
295 } else if (!ctx->uid) {
297 * We ignore events for new connections until userspace has set
298 * their context. This can only happen if an error occurs on a
299 * new connection before the user accepts it. This is okay,
300 * since the accept will just fail later.
306 list_add_tail(&uevent->list, &ctx->file->event_list);
307 wake_up_interruptible(&ctx->file->poll_wait);
309 mutex_unlock(&ctx->file->mut);
313 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
314 int in_len, int out_len)
316 struct ucma_context *ctx;
317 struct rdma_ucm_get_event cmd;
318 struct ucma_event *uevent;
322 if (out_len < sizeof uevent->resp)
325 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
328 mutex_lock(&file->mut);
329 while (list_empty(&file->event_list)) {
330 mutex_unlock(&file->mut);
332 if (file->filp->f_flags & O_NONBLOCK)
335 if (wait_event_interruptible(file->poll_wait,
336 !list_empty(&file->event_list)))
339 mutex_lock(&file->mut);
342 uevent = list_entry(file->event_list.next, struct ucma_event, list);
344 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
345 ctx = ucma_alloc_ctx(file);
350 uevent->ctx->backlog++;
351 ctx->cm_id = uevent->cm_id;
352 ctx->cm_id->context = ctx;
353 uevent->resp.id = ctx->id;
356 if (copy_to_user((void __user *)(unsigned long)cmd.response,
357 &uevent->resp, sizeof uevent->resp)) {
362 list_del(&uevent->list);
363 uevent->ctx->events_reported++;
365 uevent->mc->events_reported++;
368 mutex_unlock(&file->mut);
372 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
376 *qp_type = IB_QPT_RC;
380 *qp_type = IB_QPT_UD;
383 *qp_type = cmd->qp_type;
390 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
391 int in_len, int out_len)
393 struct rdma_ucm_create_id cmd;
394 struct rdma_ucm_create_id_resp resp;
395 struct ucma_context *ctx;
396 enum ib_qp_type qp_type;
399 if (out_len < sizeof(resp))
402 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
405 ret = ucma_get_qp_type(&cmd, &qp_type);
409 mutex_lock(&file->mut);
410 ctx = ucma_alloc_ctx(file);
411 mutex_unlock(&file->mut);
416 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
417 if (IS_ERR(ctx->cm_id)) {
418 ret = PTR_ERR(ctx->cm_id);
423 if (copy_to_user((void __user *)(unsigned long)cmd.response,
424 &resp, sizeof(resp))) {
431 rdma_destroy_id(ctx->cm_id);
434 idr_remove(&ctx_idr, ctx->id);
440 static void ucma_cleanup_multicast(struct ucma_context *ctx)
442 struct ucma_multicast *mc, *tmp;
445 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
447 idr_remove(&multicast_idr, mc->id);
453 static void ucma_cleanup_events(struct ucma_context *ctx)
455 struct ucma_event *uevent, *tmp;
457 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
458 if (uevent->ctx != ctx)
461 list_del(&uevent->list);
463 /* clear incoming connections. */
464 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
465 rdma_destroy_id(uevent->cm_id);
471 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
473 struct ucma_event *uevent, *tmp;
475 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
476 if (uevent->mc != mc)
479 list_del(&uevent->list);
484 static int ucma_free_ctx(struct ucma_context *ctx)
488 /* No new events will be generated after destroying the id. */
489 rdma_destroy_id(ctx->cm_id);
491 ucma_cleanup_multicast(ctx);
493 /* Cleanup events not yet reported to the user. */
494 mutex_lock(&ctx->file->mut);
495 ucma_cleanup_events(ctx);
496 list_del(&ctx->list);
497 mutex_unlock(&ctx->file->mut);
499 events_reported = ctx->events_reported;
501 return events_reported;
504 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
505 int in_len, int out_len)
507 struct rdma_ucm_destroy_id cmd;
508 struct rdma_ucm_destroy_id_resp resp;
509 struct ucma_context *ctx;
512 if (out_len < sizeof(resp))
515 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
519 ctx = _ucma_find_context(cmd.id, file);
521 idr_remove(&ctx_idr, ctx->id);
528 wait_for_completion(&ctx->comp);
529 resp.events_reported = ucma_free_ctx(ctx);
531 if (copy_to_user((void __user *)(unsigned long)cmd.response,
532 &resp, sizeof(resp)))
538 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
539 int in_len, int out_len)
541 struct rdma_ucm_bind_addr cmd;
542 struct ucma_context *ctx;
545 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
548 ctx = ucma_get_ctx(file, cmd.id);
552 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
557 static ssize_t ucma_resolve_addr(struct ucma_file *file,
558 const char __user *inbuf,
559 int in_len, int out_len)
561 struct rdma_ucm_resolve_addr cmd;
562 struct ucma_context *ctx;
565 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
568 ctx = ucma_get_ctx(file, cmd.id);
572 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
573 (struct sockaddr *) &cmd.dst_addr,
579 static ssize_t ucma_resolve_route(struct ucma_file *file,
580 const char __user *inbuf,
581 int in_len, int out_len)
583 struct rdma_ucm_resolve_route cmd;
584 struct ucma_context *ctx;
587 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
590 ctx = ucma_get_ctx(file, cmd.id);
594 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
599 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
600 struct rdma_route *route)
602 struct rdma_dev_addr *dev_addr;
604 resp->num_paths = route->num_paths;
605 switch (route->num_paths) {
607 dev_addr = &route->addr.dev_addr;
608 rdma_addr_get_dgid(dev_addr,
609 (union ib_gid *) &resp->ib_route[0].dgid);
610 rdma_addr_get_sgid(dev_addr,
611 (union ib_gid *) &resp->ib_route[0].sgid);
612 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
615 ib_copy_path_rec_to_user(&resp->ib_route[1],
616 &route->path_rec[1]);
619 ib_copy_path_rec_to_user(&resp->ib_route[0],
620 &route->path_rec[0]);
627 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
628 struct rdma_route *route)
630 struct rdma_dev_addr *dev_addr;
631 struct net_device *dev;
634 resp->num_paths = route->num_paths;
635 switch (route->num_paths) {
637 dev_addr = &route->addr.dev_addr;
638 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
640 vid = rdma_vlan_dev_vlan_id(dev);
644 iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
645 dev_addr->dst_dev_addr, vid);
646 iboe_addr_get_sgid(dev_addr,
647 (union ib_gid *) &resp->ib_route[0].sgid);
648 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
651 ib_copy_path_rec_to_user(&resp->ib_route[1],
652 &route->path_rec[1]);
655 ib_copy_path_rec_to_user(&resp->ib_route[0],
656 &route->path_rec[0]);
663 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
664 struct rdma_route *route)
666 struct rdma_dev_addr *dev_addr;
668 dev_addr = &route->addr.dev_addr;
669 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
670 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
673 static ssize_t ucma_query_route(struct ucma_file *file,
674 const char __user *inbuf,
675 int in_len, int out_len)
677 struct rdma_ucm_query_route cmd;
678 struct rdma_ucm_query_route_resp resp;
679 struct ucma_context *ctx;
680 struct sockaddr *addr;
683 if (out_len < sizeof(resp))
686 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
689 ctx = ucma_get_ctx(file, cmd.id);
693 memset(&resp, 0, sizeof resp);
694 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
695 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
696 sizeof(struct sockaddr_in) :
697 sizeof(struct sockaddr_in6));
698 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
699 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
700 sizeof(struct sockaddr_in) :
701 sizeof(struct sockaddr_in6));
702 if (!ctx->cm_id->device)
705 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
706 resp.port_num = ctx->cm_id->port_num;
707 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) {
708 case RDMA_TRANSPORT_IB:
709 switch (rdma_port_get_link_layer(ctx->cm_id->device,
710 ctx->cm_id->port_num)) {
711 case IB_LINK_LAYER_INFINIBAND:
712 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
714 case IB_LINK_LAYER_ETHERNET:
715 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
721 case RDMA_TRANSPORT_IWARP:
722 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
729 if (copy_to_user((void __user *)(unsigned long)cmd.response,
730 &resp, sizeof(resp)))
737 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
738 struct rdma_ucm_conn_param *src)
740 dst->private_data = src->private_data;
741 dst->private_data_len = src->private_data_len;
742 dst->responder_resources =src->responder_resources;
743 dst->initiator_depth = src->initiator_depth;
744 dst->flow_control = src->flow_control;
745 dst->retry_count = src->retry_count;
746 dst->rnr_retry_count = src->rnr_retry_count;
748 dst->qp_num = src->qp_num;
751 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
752 int in_len, int out_len)
754 struct rdma_ucm_connect cmd;
755 struct rdma_conn_param conn_param;
756 struct ucma_context *ctx;
759 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
762 if (!cmd.conn_param.valid)
765 ctx = ucma_get_ctx(file, cmd.id);
769 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
770 ret = rdma_connect(ctx->cm_id, &conn_param);
775 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
776 int in_len, int out_len)
778 struct rdma_ucm_listen cmd;
779 struct ucma_context *ctx;
782 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
785 ctx = ucma_get_ctx(file, cmd.id);
789 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
790 cmd.backlog : max_backlog;
791 ret = rdma_listen(ctx->cm_id, ctx->backlog);
796 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
797 int in_len, int out_len)
799 struct rdma_ucm_accept cmd;
800 struct rdma_conn_param conn_param;
801 struct ucma_context *ctx;
804 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
807 ctx = ucma_get_ctx(file, cmd.id);
811 if (cmd.conn_param.valid) {
813 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
814 ret = rdma_accept(ctx->cm_id, &conn_param);
816 ret = rdma_accept(ctx->cm_id, NULL);
822 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
823 int in_len, int out_len)
825 struct rdma_ucm_reject cmd;
826 struct ucma_context *ctx;
829 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
832 ctx = ucma_get_ctx(file, cmd.id);
836 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
841 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
842 int in_len, int out_len)
844 struct rdma_ucm_disconnect cmd;
845 struct ucma_context *ctx;
848 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
851 ctx = ucma_get_ctx(file, cmd.id);
855 ret = rdma_disconnect(ctx->cm_id);
860 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
861 const char __user *inbuf,
862 int in_len, int out_len)
864 struct rdma_ucm_init_qp_attr cmd;
865 struct ib_uverbs_qp_attr resp;
866 struct ucma_context *ctx;
867 struct ib_qp_attr qp_attr;
870 if (out_len < sizeof(resp))
873 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
876 ctx = ucma_get_ctx(file, cmd.id);
880 resp.qp_attr_mask = 0;
881 memset(&qp_attr, 0, sizeof qp_attr);
882 qp_attr.qp_state = cmd.qp_state;
883 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
887 ib_copy_qp_attr_to_user(&resp, &qp_attr);
888 if (copy_to_user((void __user *)(unsigned long)cmd.response,
889 &resp, sizeof(resp)))
897 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
898 void *optval, size_t optlen)
903 case RDMA_OPTION_ID_TOS:
904 if (optlen != sizeof(u8)) {
908 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
910 case RDMA_OPTION_ID_REUSEADDR:
911 if (optlen != sizeof(int)) {
915 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
924 static int ucma_set_ib_path(struct ucma_context *ctx,
925 struct ib_path_rec_data *path_data, size_t optlen)
927 struct ib_sa_path_rec sa_path;
928 struct rdma_cm_event event;
931 if (optlen % sizeof(*path_data))
934 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
935 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
936 IB_PATH_BIDIRECTIONAL))
943 ib_sa_unpack_path(path_data->path_rec, &sa_path);
944 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
948 memset(&event, 0, sizeof event);
949 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
950 return ucma_event_handler(ctx->cm_id, &event);
953 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
954 void *optval, size_t optlen)
959 case RDMA_OPTION_IB_PATH:
960 ret = ucma_set_ib_path(ctx, optval, optlen);
969 static int ucma_set_option_level(struct ucma_context *ctx, int level,
970 int optname, void *optval, size_t optlen)
976 ret = ucma_set_option_id(ctx, optname, optval, optlen);
979 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
988 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
989 int in_len, int out_len)
991 struct rdma_ucm_set_option cmd;
992 struct ucma_context *ctx;
996 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
999 ctx = ucma_get_ctx(file, cmd.id);
1001 return PTR_ERR(ctx);
1003 optval = kmalloc(cmd.optlen, GFP_KERNEL);
1009 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
1015 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1024 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1025 int in_len, int out_len)
1027 struct rdma_ucm_notify cmd;
1028 struct ucma_context *ctx;
1031 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1034 ctx = ucma_get_ctx(file, cmd.id);
1036 return PTR_ERR(ctx);
1038 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1043 static ssize_t ucma_join_multicast(struct ucma_file *file,
1044 const char __user *inbuf,
1045 int in_len, int out_len)
1047 struct rdma_ucm_join_mcast cmd;
1048 struct rdma_ucm_create_id_resp resp;
1049 struct ucma_context *ctx;
1050 struct ucma_multicast *mc;
1053 if (out_len < sizeof(resp))
1056 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1059 ctx = ucma_get_ctx(file, cmd.id);
1061 return PTR_ERR(ctx);
1063 mutex_lock(&file->mut);
1064 mc = ucma_alloc_multicast(ctx);
1071 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
1072 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1077 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1078 &resp, sizeof(resp))) {
1083 mutex_unlock(&file->mut);
1088 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1089 ucma_cleanup_mc_events(mc);
1092 idr_remove(&multicast_idr, mc->id);
1094 list_del(&mc->list);
1097 mutex_unlock(&file->mut);
1102 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1103 const char __user *inbuf,
1104 int in_len, int out_len)
1106 struct rdma_ucm_destroy_id cmd;
1107 struct rdma_ucm_destroy_id_resp resp;
1108 struct ucma_multicast *mc;
1111 if (out_len < sizeof(resp))
1114 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1118 mc = idr_find(&multicast_idr, cmd.id);
1120 mc = ERR_PTR(-ENOENT);
1121 else if (mc->ctx->file != file)
1122 mc = ERR_PTR(-EINVAL);
1124 idr_remove(&multicast_idr, mc->id);
1125 atomic_inc(&mc->ctx->ref);
1134 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1135 mutex_lock(&mc->ctx->file->mut);
1136 ucma_cleanup_mc_events(mc);
1137 list_del(&mc->list);
1138 mutex_unlock(&mc->ctx->file->mut);
1140 ucma_put_ctx(mc->ctx);
1141 resp.events_reported = mc->events_reported;
1144 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1145 &resp, sizeof(resp)))
1151 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1153 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1154 if (file1 < file2) {
1155 mutex_lock(&file1->mut);
1156 mutex_lock(&file2->mut);
1158 mutex_lock(&file2->mut);
1159 mutex_lock(&file1->mut);
1163 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1165 if (file1 < file2) {
1166 mutex_unlock(&file2->mut);
1167 mutex_unlock(&file1->mut);
1169 mutex_unlock(&file1->mut);
1170 mutex_unlock(&file2->mut);
1174 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1176 struct ucma_event *uevent, *tmp;
1178 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1179 if (uevent->ctx == ctx)
1180 list_move_tail(&uevent->list, &file->event_list);
1183 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1184 const char __user *inbuf,
1185 int in_len, int out_len)
1187 struct rdma_ucm_migrate_id cmd;
1188 struct rdma_ucm_migrate_resp resp;
1189 struct ucma_context *ctx;
1191 struct ucma_file *cur_file;
1194 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1197 /* Get current fd to protect against it being closed */
1198 filp = fget(cmd.fd);
1202 /* Validate current fd and prevent destruction of id. */
1203 ctx = ucma_get_ctx(filp->private_data, cmd.id);
1209 cur_file = ctx->file;
1210 if (cur_file == new_file) {
1211 resp.events_reported = ctx->events_reported;
1216 * Migrate events between fd's, maintaining order, and avoiding new
1217 * events being added before existing events.
1219 ucma_lock_files(cur_file, new_file);
1222 list_move_tail(&ctx->list, &new_file->ctx_list);
1223 ucma_move_events(ctx, new_file);
1224 ctx->file = new_file;
1225 resp.events_reported = ctx->events_reported;
1228 ucma_unlock_files(cur_file, new_file);
1231 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1232 &resp, sizeof(resp)))
1241 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1242 const char __user *inbuf,
1243 int in_len, int out_len) = {
1244 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1245 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1246 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1247 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1248 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1249 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1250 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1251 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1252 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1253 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1254 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1255 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1256 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1257 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1258 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1259 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1260 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1261 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1262 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1265 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1266 size_t len, loff_t *pos)
1268 struct ucma_file *file = filp->private_data;
1269 struct rdma_ucm_cmd_hdr hdr;
1272 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1275 if (len < sizeof(hdr))
1278 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1281 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1284 if (hdr.in + sizeof(hdr) > len)
1287 if (!ucma_cmd_table[hdr.cmd])
1290 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1297 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1299 struct ucma_file *file = filp->private_data;
1300 unsigned int mask = 0;
1302 poll_wait(filp, &file->poll_wait, wait);
1304 if (!list_empty(&file->event_list))
1305 mask = POLLIN | POLLRDNORM;
1311 * ucma_open() does not need the BKL:
1313 * - no global state is referred to;
1314 * - there is no ioctl method to race against;
1315 * - no further module initialization is required for open to work
1316 * after the device is registered.
1318 static int ucma_open(struct inode *inode, struct file *filp)
1320 struct ucma_file *file;
1322 file = kmalloc(sizeof *file, GFP_KERNEL);
1326 INIT_LIST_HEAD(&file->event_list);
1327 INIT_LIST_HEAD(&file->ctx_list);
1328 init_waitqueue_head(&file->poll_wait);
1329 mutex_init(&file->mut);
1331 filp->private_data = file;
1334 return nonseekable_open(inode, filp);
1337 static int ucma_close(struct inode *inode, struct file *filp)
1339 struct ucma_file *file = filp->private_data;
1340 struct ucma_context *ctx, *tmp;
1342 mutex_lock(&file->mut);
1343 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1344 mutex_unlock(&file->mut);
1347 idr_remove(&ctx_idr, ctx->id);
1351 mutex_lock(&file->mut);
1353 mutex_unlock(&file->mut);
1358 static const struct file_operations ucma_fops = {
1359 .owner = THIS_MODULE,
1361 .release = ucma_close,
1362 .write = ucma_write,
1364 .llseek = no_llseek,
1367 static struct miscdevice ucma_misc = {
1368 .minor = MISC_DYNAMIC_MINOR,
1370 .nodename = "infiniband/rdma_cm",
1375 static ssize_t show_abi_version(struct device *dev,
1376 struct device_attribute *attr,
1379 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1381 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1383 static int __init ucma_init(void)
1387 ret = misc_register(&ucma_misc);
1391 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1393 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1397 ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table);
1398 if (!ucma_ctl_table_hdr) {
1399 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1405 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1407 misc_deregister(&ucma_misc);
1411 static void __exit ucma_cleanup(void)
1413 unregister_sysctl_table(ucma_ctl_table_hdr);
1414 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1415 misc_deregister(&ucma_misc);
1416 idr_destroy(&ctx_idr);
1419 module_init(ucma_init);
1420 module_exit(ucma_cleanup);