2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_cache.h>
47 int ib_rate_to_mult(enum ib_rate rate)
50 case IB_RATE_2_5_GBPS: return 1;
51 case IB_RATE_5_GBPS: return 2;
52 case IB_RATE_10_GBPS: return 4;
53 case IB_RATE_20_GBPS: return 8;
54 case IB_RATE_30_GBPS: return 12;
55 case IB_RATE_40_GBPS: return 16;
56 case IB_RATE_60_GBPS: return 24;
57 case IB_RATE_80_GBPS: return 32;
58 case IB_RATE_120_GBPS: return 48;
62 EXPORT_SYMBOL(ib_rate_to_mult);
64 enum ib_rate mult_to_ib_rate(int mult)
67 case 1: return IB_RATE_2_5_GBPS;
68 case 2: return IB_RATE_5_GBPS;
69 case 4: return IB_RATE_10_GBPS;
70 case 8: return IB_RATE_20_GBPS;
71 case 12: return IB_RATE_30_GBPS;
72 case 16: return IB_RATE_40_GBPS;
73 case 24: return IB_RATE_60_GBPS;
74 case 32: return IB_RATE_80_GBPS;
75 case 48: return IB_RATE_120_GBPS;
76 default: return IB_RATE_PORT_CURRENT;
79 EXPORT_SYMBOL(mult_to_ib_rate);
81 enum rdma_transport_type
82 rdma_node_get_transport(enum rdma_node_type node_type)
86 case RDMA_NODE_IB_SWITCH:
87 case RDMA_NODE_IB_ROUTER:
88 return RDMA_TRANSPORT_IB;
90 return RDMA_TRANSPORT_IWARP;
96 EXPORT_SYMBOL(rdma_node_get_transport);
98 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
100 if (device->get_link_layer)
101 return device->get_link_layer(device, port_num);
103 switch (rdma_node_get_transport(device->node_type)) {
104 case RDMA_TRANSPORT_IB:
105 return IB_LINK_LAYER_INFINIBAND;
106 case RDMA_TRANSPORT_IWARP:
107 return IB_LINK_LAYER_ETHERNET;
109 return IB_LINK_LAYER_UNSPECIFIED;
112 EXPORT_SYMBOL(rdma_port_get_link_layer);
114 /* Protection domains */
116 struct ib_pd *ib_alloc_pd(struct ib_device *device)
120 pd = device->alloc_pd(device, NULL, NULL);
125 atomic_set(&pd->usecnt, 0);
130 EXPORT_SYMBOL(ib_alloc_pd);
132 int ib_dealloc_pd(struct ib_pd *pd)
134 if (atomic_read(&pd->usecnt))
137 return pd->device->dealloc_pd(pd);
139 EXPORT_SYMBOL(ib_dealloc_pd);
141 /* Address handles */
143 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
147 ah = pd->device->create_ah(pd, ah_attr);
150 ah->device = pd->device;
153 atomic_inc(&pd->usecnt);
158 EXPORT_SYMBOL(ib_create_ah);
160 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
161 struct ib_grh *grh, struct ib_ah_attr *ah_attr)
167 memset(ah_attr, 0, sizeof *ah_attr);
168 ah_attr->dlid = wc->slid;
169 ah_attr->sl = wc->sl;
170 ah_attr->src_path_bits = wc->dlid_path_bits;
171 ah_attr->port_num = port_num;
173 if (wc->wc_flags & IB_WC_GRH) {
174 ah_attr->ah_flags = IB_AH_GRH;
175 ah_attr->grh.dgid = grh->sgid;
177 ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
182 ah_attr->grh.sgid_index = (u8) gid_index;
183 flow_class = be32_to_cpu(grh->version_tclass_flow);
184 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
185 ah_attr->grh.hop_limit = 0xFF;
186 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
190 EXPORT_SYMBOL(ib_init_ah_from_wc);
192 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
193 struct ib_grh *grh, u8 port_num)
195 struct ib_ah_attr ah_attr;
198 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
202 return ib_create_ah(pd, &ah_attr);
204 EXPORT_SYMBOL(ib_create_ah_from_wc);
206 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
208 return ah->device->modify_ah ?
209 ah->device->modify_ah(ah, ah_attr) :
212 EXPORT_SYMBOL(ib_modify_ah);
214 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
216 return ah->device->query_ah ?
217 ah->device->query_ah(ah, ah_attr) :
220 EXPORT_SYMBOL(ib_query_ah);
222 int ib_destroy_ah(struct ib_ah *ah)
228 ret = ah->device->destroy_ah(ah);
230 atomic_dec(&pd->usecnt);
234 EXPORT_SYMBOL(ib_destroy_ah);
236 /* Shared receive queues */
238 struct ib_srq *ib_create_srq(struct ib_pd *pd,
239 struct ib_srq_init_attr *srq_init_attr)
243 if (!pd->device->create_srq)
244 return ERR_PTR(-ENOSYS);
246 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
249 srq->device = pd->device;
252 srq->event_handler = srq_init_attr->event_handler;
253 srq->srq_context = srq_init_attr->srq_context;
254 atomic_inc(&pd->usecnt);
255 atomic_set(&srq->usecnt, 0);
260 EXPORT_SYMBOL(ib_create_srq);
262 int ib_modify_srq(struct ib_srq *srq,
263 struct ib_srq_attr *srq_attr,
264 enum ib_srq_attr_mask srq_attr_mask)
266 return srq->device->modify_srq ?
267 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
270 EXPORT_SYMBOL(ib_modify_srq);
272 int ib_query_srq(struct ib_srq *srq,
273 struct ib_srq_attr *srq_attr)
275 return srq->device->query_srq ?
276 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
278 EXPORT_SYMBOL(ib_query_srq);
280 int ib_destroy_srq(struct ib_srq *srq)
285 if (atomic_read(&srq->usecnt))
290 ret = srq->device->destroy_srq(srq);
292 atomic_dec(&pd->usecnt);
296 EXPORT_SYMBOL(ib_destroy_srq);
300 struct ib_qp *ib_create_qp(struct ib_pd *pd,
301 struct ib_qp_init_attr *qp_init_attr)
305 qp = pd->device->create_qp(pd, qp_init_attr, NULL);
308 qp->device = pd->device;
310 qp->send_cq = qp_init_attr->send_cq;
311 qp->recv_cq = qp_init_attr->recv_cq;
312 qp->srq = qp_init_attr->srq;
314 qp->event_handler = qp_init_attr->event_handler;
315 qp->qp_context = qp_init_attr->qp_context;
316 qp->qp_type = qp_init_attr->qp_type;
317 atomic_inc(&pd->usecnt);
318 atomic_inc(&qp_init_attr->send_cq->usecnt);
319 atomic_inc(&qp_init_attr->recv_cq->usecnt);
320 if (qp_init_attr->srq)
321 atomic_inc(&qp_init_attr->srq->usecnt);
326 EXPORT_SYMBOL(ib_create_qp);
328 static const struct {
330 enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETHERTYPE + 1];
331 enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETHERTYPE + 1];
332 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
334 [IB_QPS_RESET] = { .valid = 1 },
338 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
341 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
344 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
347 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
349 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
355 [IB_QPS_RESET] = { .valid = 1 },
356 [IB_QPS_ERR] = { .valid = 1 },
360 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
363 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
366 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
369 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
371 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
378 [IB_QPT_UC] = (IB_QP_AV |
382 [IB_QPT_RC] = (IB_QP_AV |
386 IB_QP_MAX_DEST_RD_ATOMIC |
387 IB_QP_MIN_RNR_TIMER),
390 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
392 [IB_QPT_UC] = (IB_QP_ALT_PATH |
395 [IB_QPT_RC] = (IB_QP_ALT_PATH |
398 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
400 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
406 [IB_QPS_RESET] = { .valid = 1 },
407 [IB_QPS_ERR] = { .valid = 1 },
411 [IB_QPT_UD] = IB_QP_SQ_PSN,
412 [IB_QPT_UC] = IB_QP_SQ_PSN,
413 [IB_QPT_RC] = (IB_QP_TIMEOUT |
417 IB_QP_MAX_QP_RD_ATOMIC),
418 [IB_QPT_SMI] = IB_QP_SQ_PSN,
419 [IB_QPT_GSI] = IB_QP_SQ_PSN,
422 [IB_QPT_UD] = (IB_QP_CUR_STATE |
424 [IB_QPT_UC] = (IB_QP_CUR_STATE |
427 IB_QP_PATH_MIG_STATE),
428 [IB_QPT_RC] = (IB_QP_CUR_STATE |
431 IB_QP_MIN_RNR_TIMER |
432 IB_QP_PATH_MIG_STATE),
433 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
435 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
441 [IB_QPS_RESET] = { .valid = 1 },
442 [IB_QPS_ERR] = { .valid = 1 },
446 [IB_QPT_UD] = (IB_QP_CUR_STATE |
448 [IB_QPT_UC] = (IB_QP_CUR_STATE |
451 IB_QP_PATH_MIG_STATE),
452 [IB_QPT_RC] = (IB_QP_CUR_STATE |
455 IB_QP_PATH_MIG_STATE |
456 IB_QP_MIN_RNR_TIMER),
457 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
459 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
466 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
467 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
468 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
469 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
470 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
475 [IB_QPS_RESET] = { .valid = 1 },
476 [IB_QPS_ERR] = { .valid = 1 },
480 [IB_QPT_UD] = (IB_QP_CUR_STATE |
482 [IB_QPT_UC] = (IB_QP_CUR_STATE |
485 IB_QP_PATH_MIG_STATE),
486 [IB_QPT_RC] = (IB_QP_CUR_STATE |
489 IB_QP_MIN_RNR_TIMER |
490 IB_QP_PATH_MIG_STATE),
491 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
493 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
500 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
502 [IB_QPT_UC] = (IB_QP_AV |
506 IB_QP_PATH_MIG_STATE),
507 [IB_QPT_RC] = (IB_QP_PORT |
512 IB_QP_MAX_QP_RD_ATOMIC |
513 IB_QP_MAX_DEST_RD_ATOMIC |
517 IB_QP_MIN_RNR_TIMER |
518 IB_QP_PATH_MIG_STATE),
519 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
521 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
527 [IB_QPS_RESET] = { .valid = 1 },
528 [IB_QPS_ERR] = { .valid = 1 },
532 [IB_QPT_UD] = (IB_QP_CUR_STATE |
534 [IB_QPT_UC] = (IB_QP_CUR_STATE |
536 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
538 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
544 [IB_QPS_RESET] = { .valid = 1 },
545 [IB_QPS_ERR] = { .valid = 1 }
549 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
550 enum ib_qp_type type, enum ib_qp_attr_mask mask)
552 enum ib_qp_attr_mask req_param, opt_param;
554 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
555 next_state < 0 || next_state > IB_QPS_ERR)
558 if (mask & IB_QP_CUR_STATE &&
559 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
560 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
563 if (!qp_state_table[cur_state][next_state].valid)
566 req_param = qp_state_table[cur_state][next_state].req_param[type];
567 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
569 if ((mask & req_param) != req_param)
572 if (mask & ~(req_param | opt_param | IB_QP_STATE))
577 EXPORT_SYMBOL(ib_modify_qp_is_ok);
579 int ib_modify_qp(struct ib_qp *qp,
580 struct ib_qp_attr *qp_attr,
583 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
585 EXPORT_SYMBOL(ib_modify_qp);
587 int ib_query_qp(struct ib_qp *qp,
588 struct ib_qp_attr *qp_attr,
590 struct ib_qp_init_attr *qp_init_attr)
592 return qp->device->query_qp ?
593 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
596 EXPORT_SYMBOL(ib_query_qp);
598 int ib_destroy_qp(struct ib_qp *qp)
601 struct ib_cq *scq, *rcq;
610 ret = qp->device->destroy_qp(qp);
612 atomic_dec(&pd->usecnt);
613 atomic_dec(&scq->usecnt);
614 atomic_dec(&rcq->usecnt);
616 atomic_dec(&srq->usecnt);
621 EXPORT_SYMBOL(ib_destroy_qp);
623 /* Completion queues */
625 struct ib_cq *ib_create_cq(struct ib_device *device,
626 ib_comp_handler comp_handler,
627 void (*event_handler)(struct ib_event *, void *),
628 void *cq_context, int cqe, int comp_vector)
632 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
637 cq->comp_handler = comp_handler;
638 cq->event_handler = event_handler;
639 cq->cq_context = cq_context;
640 atomic_set(&cq->usecnt, 0);
645 EXPORT_SYMBOL(ib_create_cq);
647 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
649 return cq->device->modify_cq ?
650 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
652 EXPORT_SYMBOL(ib_modify_cq);
654 int ib_destroy_cq(struct ib_cq *cq)
656 if (atomic_read(&cq->usecnt))
659 return cq->device->destroy_cq(cq);
661 EXPORT_SYMBOL(ib_destroy_cq);
663 int ib_resize_cq(struct ib_cq *cq, int cqe)
665 return cq->device->resize_cq ?
666 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
668 EXPORT_SYMBOL(ib_resize_cq);
672 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
676 mr = pd->device->get_dma_mr(pd, mr_access_flags);
679 mr->device = pd->device;
682 atomic_inc(&pd->usecnt);
683 atomic_set(&mr->usecnt, 0);
688 EXPORT_SYMBOL(ib_get_dma_mr);
690 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
691 struct ib_phys_buf *phys_buf_array,
698 if (!pd->device->reg_phys_mr)
699 return ERR_PTR(-ENOSYS);
701 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
702 mr_access_flags, iova_start);
705 mr->device = pd->device;
708 atomic_inc(&pd->usecnt);
709 atomic_set(&mr->usecnt, 0);
714 EXPORT_SYMBOL(ib_reg_phys_mr);
716 int ib_rereg_phys_mr(struct ib_mr *mr,
719 struct ib_phys_buf *phys_buf_array,
724 struct ib_pd *old_pd;
727 if (!mr->device->rereg_phys_mr)
730 if (atomic_read(&mr->usecnt))
735 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
736 phys_buf_array, num_phys_buf,
737 mr_access_flags, iova_start);
739 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
740 atomic_dec(&old_pd->usecnt);
741 atomic_inc(&pd->usecnt);
746 EXPORT_SYMBOL(ib_rereg_phys_mr);
748 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
750 return mr->device->query_mr ?
751 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
753 EXPORT_SYMBOL(ib_query_mr);
755 int ib_dereg_mr(struct ib_mr *mr)
760 if (atomic_read(&mr->usecnt))
764 ret = mr->device->dereg_mr(mr);
766 atomic_dec(&pd->usecnt);
770 EXPORT_SYMBOL(ib_dereg_mr);
772 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
776 if (!pd->device->alloc_fast_reg_mr)
777 return ERR_PTR(-ENOSYS);
779 mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
782 mr->device = pd->device;
785 atomic_inc(&pd->usecnt);
786 atomic_set(&mr->usecnt, 0);
791 EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
793 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
794 int max_page_list_len)
796 struct ib_fast_reg_page_list *page_list;
798 if (!device->alloc_fast_reg_page_list)
799 return ERR_PTR(-ENOSYS);
801 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
803 if (!IS_ERR(page_list)) {
804 page_list->device = device;
805 page_list->max_page_list_len = max_page_list_len;
810 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
812 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
814 page_list->device->free_fast_reg_page_list(page_list);
816 EXPORT_SYMBOL(ib_free_fast_reg_page_list);
820 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
824 if (!pd->device->alloc_mw)
825 return ERR_PTR(-ENOSYS);
827 mw = pd->device->alloc_mw(pd);
829 mw->device = pd->device;
832 atomic_inc(&pd->usecnt);
837 EXPORT_SYMBOL(ib_alloc_mw);
839 int ib_dealloc_mw(struct ib_mw *mw)
845 ret = mw->device->dealloc_mw(mw);
847 atomic_dec(&pd->usecnt);
851 EXPORT_SYMBOL(ib_dealloc_mw);
853 /* "Fast" memory regions */
855 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
857 struct ib_fmr_attr *fmr_attr)
861 if (!pd->device->alloc_fmr)
862 return ERR_PTR(-ENOSYS);
864 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
866 fmr->device = pd->device;
868 atomic_inc(&pd->usecnt);
873 EXPORT_SYMBOL(ib_alloc_fmr);
875 int ib_unmap_fmr(struct list_head *fmr_list)
879 if (list_empty(fmr_list))
882 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
883 return fmr->device->unmap_fmr(fmr_list);
885 EXPORT_SYMBOL(ib_unmap_fmr);
887 int ib_dealloc_fmr(struct ib_fmr *fmr)
893 ret = fmr->device->dealloc_fmr(fmr);
895 atomic_dec(&pd->usecnt);
899 EXPORT_SYMBOL(ib_dealloc_fmr);
901 /* Multicast groups */
903 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
905 if (!qp->device->attach_mcast)
907 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
910 return qp->device->attach_mcast(qp, gid, lid);
912 EXPORT_SYMBOL(ib_attach_mcast);
914 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
916 if (!qp->device->detach_mcast)
918 if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
921 return qp->device->detach_mcast(qp, gid, lid);
923 EXPORT_SYMBOL(ib_detach_mcast);