Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[pandora-kernel.git] / drivers / infiniband / core / verbs.c
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  */
38
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
42
43 #include <rdma/ib_verbs.h>
44 #include <rdma/ib_cache.h>
45
46 int ib_rate_to_mult(enum ib_rate rate)
47 {
48         switch (rate) {
49         case IB_RATE_2_5_GBPS: return  1;
50         case IB_RATE_5_GBPS:   return  2;
51         case IB_RATE_10_GBPS:  return  4;
52         case IB_RATE_20_GBPS:  return  8;
53         case IB_RATE_30_GBPS:  return 12;
54         case IB_RATE_40_GBPS:  return 16;
55         case IB_RATE_60_GBPS:  return 24;
56         case IB_RATE_80_GBPS:  return 32;
57         case IB_RATE_120_GBPS: return 48;
58         default:               return -1;
59         }
60 }
61 EXPORT_SYMBOL(ib_rate_to_mult);
62
63 enum ib_rate mult_to_ib_rate(int mult)
64 {
65         switch (mult) {
66         case 1:  return IB_RATE_2_5_GBPS;
67         case 2:  return IB_RATE_5_GBPS;
68         case 4:  return IB_RATE_10_GBPS;
69         case 8:  return IB_RATE_20_GBPS;
70         case 12: return IB_RATE_30_GBPS;
71         case 16: return IB_RATE_40_GBPS;
72         case 24: return IB_RATE_60_GBPS;
73         case 32: return IB_RATE_80_GBPS;
74         case 48: return IB_RATE_120_GBPS;
75         default: return IB_RATE_PORT_CURRENT;
76         }
77 }
78 EXPORT_SYMBOL(mult_to_ib_rate);
79
80 enum rdma_transport_type
81 rdma_node_get_transport(enum rdma_node_type node_type)
82 {
83         switch (node_type) {
84         case RDMA_NODE_IB_CA:
85         case RDMA_NODE_IB_SWITCH:
86         case RDMA_NODE_IB_ROUTER:
87                 return RDMA_TRANSPORT_IB;
88         case RDMA_NODE_RNIC:
89                 return RDMA_TRANSPORT_IWARP;
90         default:
91                 BUG();
92                 return 0;
93         }
94 }
95 EXPORT_SYMBOL(rdma_node_get_transport);
96
97 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
98 {
99         if (device->get_link_layer)
100                 return device->get_link_layer(device, port_num);
101
102         switch (rdma_node_get_transport(device->node_type)) {
103         case RDMA_TRANSPORT_IB:
104                 return IB_LINK_LAYER_INFINIBAND;
105         case RDMA_TRANSPORT_IWARP:
106                 return IB_LINK_LAYER_ETHERNET;
107         default:
108                 return IB_LINK_LAYER_UNSPECIFIED;
109         }
110 }
111 EXPORT_SYMBOL(rdma_port_get_link_layer);
112
113 /* Protection domains */
114
115 struct ib_pd *ib_alloc_pd(struct ib_device *device)
116 {
117         struct ib_pd *pd;
118
119         pd = device->alloc_pd(device, NULL, NULL);
120
121         if (!IS_ERR(pd)) {
122                 pd->device  = device;
123                 pd->uobject = NULL;
124                 atomic_set(&pd->usecnt, 0);
125         }
126
127         return pd;
128 }
129 EXPORT_SYMBOL(ib_alloc_pd);
130
131 int ib_dealloc_pd(struct ib_pd *pd)
132 {
133         if (atomic_read(&pd->usecnt))
134                 return -EBUSY;
135
136         return pd->device->dealloc_pd(pd);
137 }
138 EXPORT_SYMBOL(ib_dealloc_pd);
139
140 /* Address handles */
141
142 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
143 {
144         struct ib_ah *ah;
145
146         ah = pd->device->create_ah(pd, ah_attr);
147
148         if (!IS_ERR(ah)) {
149                 ah->device  = pd->device;
150                 ah->pd      = pd;
151                 ah->uobject = NULL;
152                 atomic_inc(&pd->usecnt);
153         }
154
155         return ah;
156 }
157 EXPORT_SYMBOL(ib_create_ah);
158
159 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
160                        struct ib_grh *grh, struct ib_ah_attr *ah_attr)
161 {
162         u32 flow_class;
163         u16 gid_index;
164         int ret;
165
166         memset(ah_attr, 0, sizeof *ah_attr);
167         ah_attr->dlid = wc->slid;
168         ah_attr->sl = wc->sl;
169         ah_attr->src_path_bits = wc->dlid_path_bits;
170         ah_attr->port_num = port_num;
171
172         if (wc->wc_flags & IB_WC_GRH) {
173                 ah_attr->ah_flags = IB_AH_GRH;
174                 ah_attr->grh.dgid = grh->sgid;
175
176                 ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
177                                          &gid_index);
178                 if (ret)
179                         return ret;
180
181                 ah_attr->grh.sgid_index = (u8) gid_index;
182                 flow_class = be32_to_cpu(grh->version_tclass_flow);
183                 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
184                 ah_attr->grh.hop_limit = 0xFF;
185                 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
186         }
187         return 0;
188 }
189 EXPORT_SYMBOL(ib_init_ah_from_wc);
190
191 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
192                                    struct ib_grh *grh, u8 port_num)
193 {
194         struct ib_ah_attr ah_attr;
195         int ret;
196
197         ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
198         if (ret)
199                 return ERR_PTR(ret);
200
201         return ib_create_ah(pd, &ah_attr);
202 }
203 EXPORT_SYMBOL(ib_create_ah_from_wc);
204
205 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
206 {
207         return ah->device->modify_ah ?
208                 ah->device->modify_ah(ah, ah_attr) :
209                 -ENOSYS;
210 }
211 EXPORT_SYMBOL(ib_modify_ah);
212
213 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
214 {
215         return ah->device->query_ah ?
216                 ah->device->query_ah(ah, ah_attr) :
217                 -ENOSYS;
218 }
219 EXPORT_SYMBOL(ib_query_ah);
220
221 int ib_destroy_ah(struct ib_ah *ah)
222 {
223         struct ib_pd *pd;
224         int ret;
225
226         pd = ah->pd;
227         ret = ah->device->destroy_ah(ah);
228         if (!ret)
229                 atomic_dec(&pd->usecnt);
230
231         return ret;
232 }
233 EXPORT_SYMBOL(ib_destroy_ah);
234
235 /* Shared receive queues */
236
237 struct ib_srq *ib_create_srq(struct ib_pd *pd,
238                              struct ib_srq_init_attr *srq_init_attr)
239 {
240         struct ib_srq *srq;
241
242         if (!pd->device->create_srq)
243                 return ERR_PTR(-ENOSYS);
244
245         srq = pd->device->create_srq(pd, srq_init_attr, NULL);
246
247         if (!IS_ERR(srq)) {
248                 srq->device        = pd->device;
249                 srq->pd            = pd;
250                 srq->uobject       = NULL;
251                 srq->event_handler = srq_init_attr->event_handler;
252                 srq->srq_context   = srq_init_attr->srq_context;
253                 atomic_inc(&pd->usecnt);
254                 atomic_set(&srq->usecnt, 0);
255         }
256
257         return srq;
258 }
259 EXPORT_SYMBOL(ib_create_srq);
260
261 int ib_modify_srq(struct ib_srq *srq,
262                   struct ib_srq_attr *srq_attr,
263                   enum ib_srq_attr_mask srq_attr_mask)
264 {
265         return srq->device->modify_srq ?
266                 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
267                 -ENOSYS;
268 }
269 EXPORT_SYMBOL(ib_modify_srq);
270
271 int ib_query_srq(struct ib_srq *srq,
272                  struct ib_srq_attr *srq_attr)
273 {
274         return srq->device->query_srq ?
275                 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
276 }
277 EXPORT_SYMBOL(ib_query_srq);
278
279 int ib_destroy_srq(struct ib_srq *srq)
280 {
281         struct ib_pd *pd;
282         int ret;
283
284         if (atomic_read(&srq->usecnt))
285                 return -EBUSY;
286
287         pd = srq->pd;
288
289         ret = srq->device->destroy_srq(srq);
290         if (!ret)
291                 atomic_dec(&pd->usecnt);
292
293         return ret;
294 }
295 EXPORT_SYMBOL(ib_destroy_srq);
296
297 /* Queue pairs */
298
299 struct ib_qp *ib_create_qp(struct ib_pd *pd,
300                            struct ib_qp_init_attr *qp_init_attr)
301 {
302         struct ib_qp *qp;
303
304         qp = pd->device->create_qp(pd, qp_init_attr, NULL);
305
306         if (!IS_ERR(qp)) {
307                 qp->device        = pd->device;
308                 qp->pd            = pd;
309                 qp->send_cq       = qp_init_attr->send_cq;
310                 qp->recv_cq       = qp_init_attr->recv_cq;
311                 qp->srq           = qp_init_attr->srq;
312                 qp->uobject       = NULL;
313                 qp->event_handler = qp_init_attr->event_handler;
314                 qp->qp_context    = qp_init_attr->qp_context;
315                 qp->qp_type       = qp_init_attr->qp_type;
316                 atomic_inc(&pd->usecnt);
317                 atomic_inc(&qp_init_attr->send_cq->usecnt);
318                 atomic_inc(&qp_init_attr->recv_cq->usecnt);
319                 if (qp_init_attr->srq)
320                         atomic_inc(&qp_init_attr->srq->usecnt);
321         }
322
323         return qp;
324 }
325 EXPORT_SYMBOL(ib_create_qp);
326
327 static const struct {
328         int                     valid;
329         enum ib_qp_attr_mask    req_param[IB_QPT_RAW_ETHERTYPE + 1];
330         enum ib_qp_attr_mask    opt_param[IB_QPT_RAW_ETHERTYPE + 1];
331 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
332         [IB_QPS_RESET] = {
333                 [IB_QPS_RESET] = { .valid = 1 },
334                 [IB_QPS_INIT]  = {
335                         .valid = 1,
336                         .req_param = {
337                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
338                                                 IB_QP_PORT                      |
339                                                 IB_QP_QKEY),
340                                 [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
341                                                 IB_QP_PORT                      |
342                                                 IB_QP_ACCESS_FLAGS),
343                                 [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
344                                                 IB_QP_PORT                      |
345                                                 IB_QP_ACCESS_FLAGS),
346                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
347                                                 IB_QP_QKEY),
348                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
349                                                 IB_QP_QKEY),
350                         }
351                 },
352         },
353         [IB_QPS_INIT]  = {
354                 [IB_QPS_RESET] = { .valid = 1 },
355                 [IB_QPS_ERR] =   { .valid = 1 },
356                 [IB_QPS_INIT]  = {
357                         .valid = 1,
358                         .opt_param = {
359                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
360                                                 IB_QP_PORT                      |
361                                                 IB_QP_QKEY),
362                                 [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
363                                                 IB_QP_PORT                      |
364                                                 IB_QP_ACCESS_FLAGS),
365                                 [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
366                                                 IB_QP_PORT                      |
367                                                 IB_QP_ACCESS_FLAGS),
368                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
369                                                 IB_QP_QKEY),
370                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
371                                                 IB_QP_QKEY),
372                         }
373                 },
374                 [IB_QPS_RTR]   = {
375                         .valid = 1,
376                         .req_param = {
377                                 [IB_QPT_UC]  = (IB_QP_AV                        |
378                                                 IB_QP_PATH_MTU                  |
379                                                 IB_QP_DEST_QPN                  |
380                                                 IB_QP_RQ_PSN),
381                                 [IB_QPT_RC]  = (IB_QP_AV                        |
382                                                 IB_QP_PATH_MTU                  |
383                                                 IB_QP_DEST_QPN                  |
384                                                 IB_QP_RQ_PSN                    |
385                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
386                                                 IB_QP_MIN_RNR_TIMER),
387                         },
388                         .opt_param = {
389                                  [IB_QPT_UD]  = (IB_QP_PKEY_INDEX               |
390                                                  IB_QP_QKEY),
391                                  [IB_QPT_UC]  = (IB_QP_ALT_PATH                 |
392                                                  IB_QP_ACCESS_FLAGS             |
393                                                  IB_QP_PKEY_INDEX),
394                                  [IB_QPT_RC]  = (IB_QP_ALT_PATH                 |
395                                                  IB_QP_ACCESS_FLAGS             |
396                                                  IB_QP_PKEY_INDEX),
397                                  [IB_QPT_SMI] = (IB_QP_PKEY_INDEX               |
398                                                  IB_QP_QKEY),
399                                  [IB_QPT_GSI] = (IB_QP_PKEY_INDEX               |
400                                                  IB_QP_QKEY),
401                          }
402                 }
403         },
404         [IB_QPS_RTR]   = {
405                 [IB_QPS_RESET] = { .valid = 1 },
406                 [IB_QPS_ERR] =   { .valid = 1 },
407                 [IB_QPS_RTS]   = {
408                         .valid = 1,
409                         .req_param = {
410                                 [IB_QPT_UD]  = IB_QP_SQ_PSN,
411                                 [IB_QPT_UC]  = IB_QP_SQ_PSN,
412                                 [IB_QPT_RC]  = (IB_QP_TIMEOUT                   |
413                                                 IB_QP_RETRY_CNT                 |
414                                                 IB_QP_RNR_RETRY                 |
415                                                 IB_QP_SQ_PSN                    |
416                                                 IB_QP_MAX_QP_RD_ATOMIC),
417                                 [IB_QPT_SMI] = IB_QP_SQ_PSN,
418                                 [IB_QPT_GSI] = IB_QP_SQ_PSN,
419                         },
420                         .opt_param = {
421                                  [IB_QPT_UD]  = (IB_QP_CUR_STATE                |
422                                                  IB_QP_QKEY),
423                                  [IB_QPT_UC]  = (IB_QP_CUR_STATE                |
424                                                  IB_QP_ALT_PATH                 |
425                                                  IB_QP_ACCESS_FLAGS             |
426                                                  IB_QP_PATH_MIG_STATE),
427                                  [IB_QPT_RC]  = (IB_QP_CUR_STATE                |
428                                                  IB_QP_ALT_PATH                 |
429                                                  IB_QP_ACCESS_FLAGS             |
430                                                  IB_QP_MIN_RNR_TIMER            |
431                                                  IB_QP_PATH_MIG_STATE),
432                                  [IB_QPT_SMI] = (IB_QP_CUR_STATE                |
433                                                  IB_QP_QKEY),
434                                  [IB_QPT_GSI] = (IB_QP_CUR_STATE                |
435                                                  IB_QP_QKEY),
436                          }
437                 }
438         },
439         [IB_QPS_RTS]   = {
440                 [IB_QPS_RESET] = { .valid = 1 },
441                 [IB_QPS_ERR] =   { .valid = 1 },
442                 [IB_QPS_RTS]   = {
443                         .valid = 1,
444                         .opt_param = {
445                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
446                                                 IB_QP_QKEY),
447                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
448                                                 IB_QP_ACCESS_FLAGS              |
449                                                 IB_QP_ALT_PATH                  |
450                                                 IB_QP_PATH_MIG_STATE),
451                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
452                                                 IB_QP_ACCESS_FLAGS              |
453                                                 IB_QP_ALT_PATH                  |
454                                                 IB_QP_PATH_MIG_STATE            |
455                                                 IB_QP_MIN_RNR_TIMER),
456                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
457                                                 IB_QP_QKEY),
458                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
459                                                 IB_QP_QKEY),
460                         }
461                 },
462                 [IB_QPS_SQD]   = {
463                         .valid = 1,
464                         .opt_param = {
465                                 [IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
466                                 [IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
467                                 [IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
468                                 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
469                                 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
470                         }
471                 },
472         },
473         [IB_QPS_SQD]   = {
474                 [IB_QPS_RESET] = { .valid = 1 },
475                 [IB_QPS_ERR] =   { .valid = 1 },
476                 [IB_QPS_RTS]   = {
477                         .valid = 1,
478                         .opt_param = {
479                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
480                                                 IB_QP_QKEY),
481                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
482                                                 IB_QP_ALT_PATH                  |
483                                                 IB_QP_ACCESS_FLAGS              |
484                                                 IB_QP_PATH_MIG_STATE),
485                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
486                                                 IB_QP_ALT_PATH                  |
487                                                 IB_QP_ACCESS_FLAGS              |
488                                                 IB_QP_MIN_RNR_TIMER             |
489                                                 IB_QP_PATH_MIG_STATE),
490                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
491                                                 IB_QP_QKEY),
492                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
493                                                 IB_QP_QKEY),
494                         }
495                 },
496                 [IB_QPS_SQD]   = {
497                         .valid = 1,
498                         .opt_param = {
499                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
500                                                 IB_QP_QKEY),
501                                 [IB_QPT_UC]  = (IB_QP_AV                        |
502                                                 IB_QP_ALT_PATH                  |
503                                                 IB_QP_ACCESS_FLAGS              |
504                                                 IB_QP_PKEY_INDEX                |
505                                                 IB_QP_PATH_MIG_STATE),
506                                 [IB_QPT_RC]  = (IB_QP_PORT                      |
507                                                 IB_QP_AV                        |
508                                                 IB_QP_TIMEOUT                   |
509                                                 IB_QP_RETRY_CNT                 |
510                                                 IB_QP_RNR_RETRY                 |
511                                                 IB_QP_MAX_QP_RD_ATOMIC          |
512                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
513                                                 IB_QP_ALT_PATH                  |
514                                                 IB_QP_ACCESS_FLAGS              |
515                                                 IB_QP_PKEY_INDEX                |
516                                                 IB_QP_MIN_RNR_TIMER             |
517                                                 IB_QP_PATH_MIG_STATE),
518                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
519                                                 IB_QP_QKEY),
520                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
521                                                 IB_QP_QKEY),
522                         }
523                 }
524         },
525         [IB_QPS_SQE]   = {
526                 [IB_QPS_RESET] = { .valid = 1 },
527                 [IB_QPS_ERR] =   { .valid = 1 },
528                 [IB_QPS_RTS]   = {
529                         .valid = 1,
530                         .opt_param = {
531                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
532                                                 IB_QP_QKEY),
533                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
534                                                 IB_QP_ACCESS_FLAGS),
535                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
536                                                 IB_QP_QKEY),
537                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
538                                                 IB_QP_QKEY),
539                         }
540                 }
541         },
542         [IB_QPS_ERR] = {
543                 [IB_QPS_RESET] = { .valid = 1 },
544                 [IB_QPS_ERR] =   { .valid = 1 }
545         }
546 };
547
548 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
549                        enum ib_qp_type type, enum ib_qp_attr_mask mask)
550 {
551         enum ib_qp_attr_mask req_param, opt_param;
552
553         if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
554             next_state < 0 || next_state > IB_QPS_ERR)
555                 return 0;
556
557         if (mask & IB_QP_CUR_STATE  &&
558             cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
559             cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
560                 return 0;
561
562         if (!qp_state_table[cur_state][next_state].valid)
563                 return 0;
564
565         req_param = qp_state_table[cur_state][next_state].req_param[type];
566         opt_param = qp_state_table[cur_state][next_state].opt_param[type];
567
568         if ((mask & req_param) != req_param)
569                 return 0;
570
571         if (mask & ~(req_param | opt_param | IB_QP_STATE))
572                 return 0;
573
574         return 1;
575 }
576 EXPORT_SYMBOL(ib_modify_qp_is_ok);
577
578 int ib_modify_qp(struct ib_qp *qp,
579                  struct ib_qp_attr *qp_attr,
580                  int qp_attr_mask)
581 {
582         return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
583 }
584 EXPORT_SYMBOL(ib_modify_qp);
585
586 int ib_query_qp(struct ib_qp *qp,
587                 struct ib_qp_attr *qp_attr,
588                 int qp_attr_mask,
589                 struct ib_qp_init_attr *qp_init_attr)
590 {
591         return qp->device->query_qp ?
592                 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
593                 -ENOSYS;
594 }
595 EXPORT_SYMBOL(ib_query_qp);
596
597 int ib_destroy_qp(struct ib_qp *qp)
598 {
599         struct ib_pd *pd;
600         struct ib_cq *scq, *rcq;
601         struct ib_srq *srq;
602         int ret;
603
604         pd  = qp->pd;
605         scq = qp->send_cq;
606         rcq = qp->recv_cq;
607         srq = qp->srq;
608
609         ret = qp->device->destroy_qp(qp);
610         if (!ret) {
611                 atomic_dec(&pd->usecnt);
612                 atomic_dec(&scq->usecnt);
613                 atomic_dec(&rcq->usecnt);
614                 if (srq)
615                         atomic_dec(&srq->usecnt);
616         }
617
618         return ret;
619 }
620 EXPORT_SYMBOL(ib_destroy_qp);
621
622 /* Completion queues */
623
624 struct ib_cq *ib_create_cq(struct ib_device *device,
625                            ib_comp_handler comp_handler,
626                            void (*event_handler)(struct ib_event *, void *),
627                            void *cq_context, int cqe, int comp_vector)
628 {
629         struct ib_cq *cq;
630
631         cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
632
633         if (!IS_ERR(cq)) {
634                 cq->device        = device;
635                 cq->uobject       = NULL;
636                 cq->comp_handler  = comp_handler;
637                 cq->event_handler = event_handler;
638                 cq->cq_context    = cq_context;
639                 atomic_set(&cq->usecnt, 0);
640         }
641
642         return cq;
643 }
644 EXPORT_SYMBOL(ib_create_cq);
645
646 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
647 {
648         return cq->device->modify_cq ?
649                 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
650 }
651 EXPORT_SYMBOL(ib_modify_cq);
652
653 int ib_destroy_cq(struct ib_cq *cq)
654 {
655         if (atomic_read(&cq->usecnt))
656                 return -EBUSY;
657
658         return cq->device->destroy_cq(cq);
659 }
660 EXPORT_SYMBOL(ib_destroy_cq);
661
662 int ib_resize_cq(struct ib_cq *cq, int cqe)
663 {
664         return cq->device->resize_cq ?
665                 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
666 }
667 EXPORT_SYMBOL(ib_resize_cq);
668
669 /* Memory regions */
670
671 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
672 {
673         struct ib_mr *mr;
674
675         mr = pd->device->get_dma_mr(pd, mr_access_flags);
676
677         if (!IS_ERR(mr)) {
678                 mr->device  = pd->device;
679                 mr->pd      = pd;
680                 mr->uobject = NULL;
681                 atomic_inc(&pd->usecnt);
682                 atomic_set(&mr->usecnt, 0);
683         }
684
685         return mr;
686 }
687 EXPORT_SYMBOL(ib_get_dma_mr);
688
689 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
690                              struct ib_phys_buf *phys_buf_array,
691                              int num_phys_buf,
692                              int mr_access_flags,
693                              u64 *iova_start)
694 {
695         struct ib_mr *mr;
696
697         if (!pd->device->reg_phys_mr)
698                 return ERR_PTR(-ENOSYS);
699
700         mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
701                                      mr_access_flags, iova_start);
702
703         if (!IS_ERR(mr)) {
704                 mr->device  = pd->device;
705                 mr->pd      = pd;
706                 mr->uobject = NULL;
707                 atomic_inc(&pd->usecnt);
708                 atomic_set(&mr->usecnt, 0);
709         }
710
711         return mr;
712 }
713 EXPORT_SYMBOL(ib_reg_phys_mr);
714
715 int ib_rereg_phys_mr(struct ib_mr *mr,
716                      int mr_rereg_mask,
717                      struct ib_pd *pd,
718                      struct ib_phys_buf *phys_buf_array,
719                      int num_phys_buf,
720                      int mr_access_flags,
721                      u64 *iova_start)
722 {
723         struct ib_pd *old_pd;
724         int ret;
725
726         if (!mr->device->rereg_phys_mr)
727                 return -ENOSYS;
728
729         if (atomic_read(&mr->usecnt))
730                 return -EBUSY;
731
732         old_pd = mr->pd;
733
734         ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
735                                         phys_buf_array, num_phys_buf,
736                                         mr_access_flags, iova_start);
737
738         if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
739                 atomic_dec(&old_pd->usecnt);
740                 atomic_inc(&pd->usecnt);
741         }
742
743         return ret;
744 }
745 EXPORT_SYMBOL(ib_rereg_phys_mr);
746
747 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
748 {
749         return mr->device->query_mr ?
750                 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
751 }
752 EXPORT_SYMBOL(ib_query_mr);
753
754 int ib_dereg_mr(struct ib_mr *mr)
755 {
756         struct ib_pd *pd;
757         int ret;
758
759         if (atomic_read(&mr->usecnt))
760                 return -EBUSY;
761
762         pd = mr->pd;
763         ret = mr->device->dereg_mr(mr);
764         if (!ret)
765                 atomic_dec(&pd->usecnt);
766
767         return ret;
768 }
769 EXPORT_SYMBOL(ib_dereg_mr);
770
771 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
772 {
773         struct ib_mr *mr;
774
775         if (!pd->device->alloc_fast_reg_mr)
776                 return ERR_PTR(-ENOSYS);
777
778         mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
779
780         if (!IS_ERR(mr)) {
781                 mr->device  = pd->device;
782                 mr->pd      = pd;
783                 mr->uobject = NULL;
784                 atomic_inc(&pd->usecnt);
785                 atomic_set(&mr->usecnt, 0);
786         }
787
788         return mr;
789 }
790 EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
791
792 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
793                                                           int max_page_list_len)
794 {
795         struct ib_fast_reg_page_list *page_list;
796
797         if (!device->alloc_fast_reg_page_list)
798                 return ERR_PTR(-ENOSYS);
799
800         page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
801
802         if (!IS_ERR(page_list)) {
803                 page_list->device = device;
804                 page_list->max_page_list_len = max_page_list_len;
805         }
806
807         return page_list;
808 }
809 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
810
811 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
812 {
813         page_list->device->free_fast_reg_page_list(page_list);
814 }
815 EXPORT_SYMBOL(ib_free_fast_reg_page_list);
816
817 /* Memory windows */
818
819 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
820 {
821         struct ib_mw *mw;
822
823         if (!pd->device->alloc_mw)
824                 return ERR_PTR(-ENOSYS);
825
826         mw = pd->device->alloc_mw(pd);
827         if (!IS_ERR(mw)) {
828                 mw->device  = pd->device;
829                 mw->pd      = pd;
830                 mw->uobject = NULL;
831                 atomic_inc(&pd->usecnt);
832         }
833
834         return mw;
835 }
836 EXPORT_SYMBOL(ib_alloc_mw);
837
838 int ib_dealloc_mw(struct ib_mw *mw)
839 {
840         struct ib_pd *pd;
841         int ret;
842
843         pd = mw->pd;
844         ret = mw->device->dealloc_mw(mw);
845         if (!ret)
846                 atomic_dec(&pd->usecnt);
847
848         return ret;
849 }
850 EXPORT_SYMBOL(ib_dealloc_mw);
851
852 /* "Fast" memory regions */
853
854 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
855                             int mr_access_flags,
856                             struct ib_fmr_attr *fmr_attr)
857 {
858         struct ib_fmr *fmr;
859
860         if (!pd->device->alloc_fmr)
861                 return ERR_PTR(-ENOSYS);
862
863         fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
864         if (!IS_ERR(fmr)) {
865                 fmr->device = pd->device;
866                 fmr->pd     = pd;
867                 atomic_inc(&pd->usecnt);
868         }
869
870         return fmr;
871 }
872 EXPORT_SYMBOL(ib_alloc_fmr);
873
874 int ib_unmap_fmr(struct list_head *fmr_list)
875 {
876         struct ib_fmr *fmr;
877
878         if (list_empty(fmr_list))
879                 return 0;
880
881         fmr = list_entry(fmr_list->next, struct ib_fmr, list);
882         return fmr->device->unmap_fmr(fmr_list);
883 }
884 EXPORT_SYMBOL(ib_unmap_fmr);
885
886 int ib_dealloc_fmr(struct ib_fmr *fmr)
887 {
888         struct ib_pd *pd;
889         int ret;
890
891         pd = fmr->pd;
892         ret = fmr->device->dealloc_fmr(fmr);
893         if (!ret)
894                 atomic_dec(&pd->usecnt);
895
896         return ret;
897 }
898 EXPORT_SYMBOL(ib_dealloc_fmr);
899
900 /* Multicast groups */
901
902 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
903 {
904         if (!qp->device->attach_mcast)
905                 return -ENOSYS;
906         if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
907                 return -EINVAL;
908
909         return qp->device->attach_mcast(qp, gid, lid);
910 }
911 EXPORT_SYMBOL(ib_attach_mcast);
912
913 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
914 {
915         if (!qp->device->detach_mcast)
916                 return -ENOSYS;
917         if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
918                 return -EINVAL;
919
920         return qp->device->detach_mcast(qp, gid, lid);
921 }
922 EXPORT_SYMBOL(ib_detach_mcast);