Merge branches 'release', 'asus', 'sony-laptop' and 'thinkpad' into release
[pandora-kernel.git] / drivers / infiniband / hw / mthca / mthca_provider.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
5  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  *
36  * $Id: mthca_provider.c 4859 2006-01-09 21:55:10Z roland $
37  */
38
39 #include <rdma/ib_smi.h>
40 #include <rdma/ib_umem.h>
41 #include <rdma/ib_user_verbs.h>
42 #include <linux/mm.h>
43
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
46 #include "mthca_user.h"
47 #include "mthca_memfree.h"
48
49 static void init_query_mad(struct ib_smp *mad)
50 {
51         mad->base_version  = 1;
52         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
53         mad->class_version = 1;
54         mad->method        = IB_MGMT_METHOD_GET;
55 }
56
57 static int mthca_query_device(struct ib_device *ibdev,
58                               struct ib_device_attr *props)
59 {
60         struct ib_smp *in_mad  = NULL;
61         struct ib_smp *out_mad = NULL;
62         int err = -ENOMEM;
63         struct mthca_dev* mdev = to_mdev(ibdev);
64
65         u8 status;
66
67         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
68         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
69         if (!in_mad || !out_mad)
70                 goto out;
71
72         memset(props, 0, sizeof *props);
73
74         props->fw_ver              = mdev->fw_ver;
75
76         init_query_mad(in_mad);
77         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
78
79         err = mthca_MAD_IFC(mdev, 1, 1,
80                             1, NULL, NULL, in_mad, out_mad,
81                             &status);
82         if (err)
83                 goto out;
84         if (status) {
85                 err = -EINVAL;
86                 goto out;
87         }
88
89         props->device_cap_flags    = mdev->device_cap_flags;
90         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
91                 0xffffff;
92         props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
93         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
94         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
95
96         props->max_mr_size         = ~0ull;
97         props->page_size_cap       = mdev->limits.page_size_cap;
98         props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
99         props->max_qp_wr           = mdev->limits.max_wqes;
100         props->max_sge             = mdev->limits.max_sg;
101         props->max_cq              = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
102         props->max_cqe             = mdev->limits.max_cqes;
103         props->max_mr              = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
104         props->max_pd              = mdev->limits.num_pds - mdev->limits.reserved_pds;
105         props->max_qp_rd_atom      = 1 << mdev->qp_table.rdb_shift;
106         props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
107         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
108         props->max_srq             = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
109         props->max_srq_wr          = mdev->limits.max_srq_wqes;
110         props->max_srq_sge         = mdev->limits.max_srq_sge;
111         props->local_ca_ack_delay  = mdev->limits.local_ca_ack_delay;
112         props->atomic_cap          = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
113                                         IB_ATOMIC_HCA : IB_ATOMIC_NONE;
114         props->max_pkeys           = mdev->limits.pkey_table_len;
115         props->max_mcast_grp       = mdev->limits.num_mgms + mdev->limits.num_amgms;
116         props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
117         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
118                                            props->max_mcast_grp;
119         /*
120          * If Sinai memory key optimization is being used, then only
121          * the 8-bit key portion will change.  For other HCAs, the
122          * unused index bits will also be used for FMR remapping.
123          */
124         if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
125                 props->max_map_per_fmr = 255;
126         else
127                 props->max_map_per_fmr =
128                         (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
129
130         err = 0;
131  out:
132         kfree(in_mad);
133         kfree(out_mad);
134         return err;
135 }
136
137 static int mthca_query_port(struct ib_device *ibdev,
138                             u8 port, struct ib_port_attr *props)
139 {
140         struct ib_smp *in_mad  = NULL;
141         struct ib_smp *out_mad = NULL;
142         int err = -ENOMEM;
143         u8 status;
144
145         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
146         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
147         if (!in_mad || !out_mad)
148                 goto out;
149
150         memset(props, 0, sizeof *props);
151
152         init_query_mad(in_mad);
153         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
154         in_mad->attr_mod = cpu_to_be32(port);
155
156         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
157                             port, NULL, NULL, in_mad, out_mad,
158                             &status);
159         if (err)
160                 goto out;
161         if (status) {
162                 err = -EINVAL;
163                 goto out;
164         }
165
166         props->lid               = be16_to_cpup((__be16 *) (out_mad->data + 16));
167         props->lmc               = out_mad->data[34] & 0x7;
168         props->sm_lid            = be16_to_cpup((__be16 *) (out_mad->data + 18));
169         props->sm_sl             = out_mad->data[36] & 0xf;
170         props->state             = out_mad->data[32] & 0xf;
171         props->phys_state        = out_mad->data[33] >> 4;
172         props->port_cap_flags    = be32_to_cpup((__be32 *) (out_mad->data + 20));
173         props->gid_tbl_len       = to_mdev(ibdev)->limits.gid_table_len;
174         props->max_msg_sz        = 0x80000000;
175         props->pkey_tbl_len      = to_mdev(ibdev)->limits.pkey_table_len;
176         props->bad_pkey_cntr     = be16_to_cpup((__be16 *) (out_mad->data + 46));
177         props->qkey_viol_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 48));
178         props->active_width      = out_mad->data[31] & 0xf;
179         props->active_speed      = out_mad->data[35] >> 4;
180         props->max_mtu           = out_mad->data[41] & 0xf;
181         props->active_mtu        = out_mad->data[36] >> 4;
182         props->subnet_timeout    = out_mad->data[51] & 0x1f;
183         props->max_vl_num        = out_mad->data[37] >> 4;
184         props->init_type_reply   = out_mad->data[41] >> 4;
185
186  out:
187         kfree(in_mad);
188         kfree(out_mad);
189         return err;
190 }
191
192 static int mthca_modify_device(struct ib_device *ibdev,
193                                int mask,
194                                struct ib_device_modify *props)
195 {
196         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
197                 return -EOPNOTSUPP;
198
199         if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
200                 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
201                         return -ERESTARTSYS;
202                 memcpy(ibdev->node_desc, props->node_desc, 64);
203                 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
204         }
205
206         return 0;
207 }
208
209 static int mthca_modify_port(struct ib_device *ibdev,
210                              u8 port, int port_modify_mask,
211                              struct ib_port_modify *props)
212 {
213         struct mthca_set_ib_param set_ib;
214         struct ib_port_attr attr;
215         int err;
216         u8 status;
217
218         if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
219                 return -ERESTARTSYS;
220
221         err = mthca_query_port(ibdev, port, &attr);
222         if (err)
223                 goto out;
224
225         set_ib.set_si_guid     = 0;
226         set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
227
228         set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
229                 ~props->clr_port_cap_mask;
230
231         err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
232         if (err)
233                 goto out;
234         if (status) {
235                 err = -EINVAL;
236                 goto out;
237         }
238
239 out:
240         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
241         return err;
242 }
243
244 static int mthca_query_pkey(struct ib_device *ibdev,
245                             u8 port, u16 index, u16 *pkey)
246 {
247         struct ib_smp *in_mad  = NULL;
248         struct ib_smp *out_mad = NULL;
249         int err = -ENOMEM;
250         u8 status;
251
252         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
253         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
254         if (!in_mad || !out_mad)
255                 goto out;
256
257         init_query_mad(in_mad);
258         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
259         in_mad->attr_mod = cpu_to_be32(index / 32);
260
261         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
262                             port, NULL, NULL, in_mad, out_mad,
263                             &status);
264         if (err)
265                 goto out;
266         if (status) {
267                 err = -EINVAL;
268                 goto out;
269         }
270
271         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
272
273  out:
274         kfree(in_mad);
275         kfree(out_mad);
276         return err;
277 }
278
279 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
280                            int index, union ib_gid *gid)
281 {
282         struct ib_smp *in_mad  = NULL;
283         struct ib_smp *out_mad = NULL;
284         int err = -ENOMEM;
285         u8 status;
286
287         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
288         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
289         if (!in_mad || !out_mad)
290                 goto out;
291
292         init_query_mad(in_mad);
293         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
294         in_mad->attr_mod = cpu_to_be32(port);
295
296         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
297                             port, NULL, NULL, in_mad, out_mad,
298                             &status);
299         if (err)
300                 goto out;
301         if (status) {
302                 err = -EINVAL;
303                 goto out;
304         }
305
306         memcpy(gid->raw, out_mad->data + 8, 8);
307
308         init_query_mad(in_mad);
309         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
310         in_mad->attr_mod = cpu_to_be32(index / 8);
311
312         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
313                             port, NULL, NULL, in_mad, out_mad,
314                             &status);
315         if (err)
316                 goto out;
317         if (status) {
318                 err = -EINVAL;
319                 goto out;
320         }
321
322         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
323
324  out:
325         kfree(in_mad);
326         kfree(out_mad);
327         return err;
328 }
329
330 static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
331                                                 struct ib_udata *udata)
332 {
333         struct mthca_alloc_ucontext_resp uresp;
334         struct mthca_ucontext           *context;
335         int                              err;
336
337         memset(&uresp, 0, sizeof uresp);
338
339         uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
340         if (mthca_is_memfree(to_mdev(ibdev)))
341                 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
342         else
343                 uresp.uarc_size = 0;
344
345         context = kmalloc(sizeof *context, GFP_KERNEL);
346         if (!context)
347                 return ERR_PTR(-ENOMEM);
348
349         err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
350         if (err) {
351                 kfree(context);
352                 return ERR_PTR(err);
353         }
354
355         context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
356         if (IS_ERR(context->db_tab)) {
357                 err = PTR_ERR(context->db_tab);
358                 mthca_uar_free(to_mdev(ibdev), &context->uar);
359                 kfree(context);
360                 return ERR_PTR(err);
361         }
362
363         if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
364                 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
365                 mthca_uar_free(to_mdev(ibdev), &context->uar);
366                 kfree(context);
367                 return ERR_PTR(-EFAULT);
368         }
369
370         return &context->ibucontext;
371 }
372
373 static int mthca_dealloc_ucontext(struct ib_ucontext *context)
374 {
375         mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
376                                   to_mucontext(context)->db_tab);
377         mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
378         kfree(to_mucontext(context));
379
380         return 0;
381 }
382
383 static int mthca_mmap_uar(struct ib_ucontext *context,
384                           struct vm_area_struct *vma)
385 {
386         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
387                 return -EINVAL;
388
389         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
390
391         if (io_remap_pfn_range(vma, vma->vm_start,
392                                to_mucontext(context)->uar.pfn,
393                                PAGE_SIZE, vma->vm_page_prot))
394                 return -EAGAIN;
395
396         return 0;
397 }
398
399 static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
400                                     struct ib_ucontext *context,
401                                     struct ib_udata *udata)
402 {
403         struct mthca_pd *pd;
404         int err;
405
406         pd = kmalloc(sizeof *pd, GFP_KERNEL);
407         if (!pd)
408                 return ERR_PTR(-ENOMEM);
409
410         err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
411         if (err) {
412                 kfree(pd);
413                 return ERR_PTR(err);
414         }
415
416         if (context) {
417                 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
418                         mthca_pd_free(to_mdev(ibdev), pd);
419                         kfree(pd);
420                         return ERR_PTR(-EFAULT);
421                 }
422         }
423
424         return &pd->ibpd;
425 }
426
427 static int mthca_dealloc_pd(struct ib_pd *pd)
428 {
429         mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
430         kfree(pd);
431
432         return 0;
433 }
434
435 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
436                                      struct ib_ah_attr *ah_attr)
437 {
438         int err;
439         struct mthca_ah *ah;
440
441         ah = kmalloc(sizeof *ah, GFP_ATOMIC);
442         if (!ah)
443                 return ERR_PTR(-ENOMEM);
444
445         err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
446         if (err) {
447                 kfree(ah);
448                 return ERR_PTR(err);
449         }
450
451         return &ah->ibah;
452 }
453
454 static int mthca_ah_destroy(struct ib_ah *ah)
455 {
456         mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
457         kfree(ah);
458
459         return 0;
460 }
461
462 static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
463                                        struct ib_srq_init_attr *init_attr,
464                                        struct ib_udata *udata)
465 {
466         struct mthca_create_srq ucmd;
467         struct mthca_ucontext *context = NULL;
468         struct mthca_srq *srq;
469         int err;
470
471         srq = kmalloc(sizeof *srq, GFP_KERNEL);
472         if (!srq)
473                 return ERR_PTR(-ENOMEM);
474
475         if (pd->uobject) {
476                 context = to_mucontext(pd->uobject->context);
477
478                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
479                         err = -EFAULT;
480                         goto err_free;
481                 }
482
483                 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
484                                         context->db_tab, ucmd.db_index,
485                                         ucmd.db_page);
486
487                 if (err)
488                         goto err_free;
489
490                 srq->mr.ibmr.lkey = ucmd.lkey;
491                 srq->db_index     = ucmd.db_index;
492         }
493
494         err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
495                               &init_attr->attr, srq);
496
497         if (err && pd->uobject)
498                 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
499                                     context->db_tab, ucmd.db_index);
500
501         if (err)
502                 goto err_free;
503
504         if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
505                 mthca_free_srq(to_mdev(pd->device), srq);
506                 err = -EFAULT;
507                 goto err_free;
508         }
509
510         return &srq->ibsrq;
511
512 err_free:
513         kfree(srq);
514
515         return ERR_PTR(err);
516 }
517
518 static int mthca_destroy_srq(struct ib_srq *srq)
519 {
520         struct mthca_ucontext *context;
521
522         if (srq->uobject) {
523                 context = to_mucontext(srq->uobject->context);
524
525                 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
526                                     context->db_tab, to_msrq(srq)->db_index);
527         }
528
529         mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
530         kfree(srq);
531
532         return 0;
533 }
534
535 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
536                                      struct ib_qp_init_attr *init_attr,
537                                      struct ib_udata *udata)
538 {
539         struct mthca_create_qp ucmd;
540         struct mthca_qp *qp;
541         int err;
542
543         switch (init_attr->qp_type) {
544         case IB_QPT_RC:
545         case IB_QPT_UC:
546         case IB_QPT_UD:
547         {
548                 struct mthca_ucontext *context;
549
550                 qp = kmalloc(sizeof *qp, GFP_KERNEL);
551                 if (!qp)
552                         return ERR_PTR(-ENOMEM);
553
554                 if (pd->uobject) {
555                         context = to_mucontext(pd->uobject->context);
556
557                         if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
558                                 kfree(qp);
559                                 return ERR_PTR(-EFAULT);
560                         }
561
562                         err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
563                                                 context->db_tab,
564                                                 ucmd.sq_db_index, ucmd.sq_db_page);
565                         if (err) {
566                                 kfree(qp);
567                                 return ERR_PTR(err);
568                         }
569
570                         err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
571                                                 context->db_tab,
572                                                 ucmd.rq_db_index, ucmd.rq_db_page);
573                         if (err) {
574                                 mthca_unmap_user_db(to_mdev(pd->device),
575                                                     &context->uar,
576                                                     context->db_tab,
577                                                     ucmd.sq_db_index);
578                                 kfree(qp);
579                                 return ERR_PTR(err);
580                         }
581
582                         qp->mr.ibmr.lkey = ucmd.lkey;
583                         qp->sq.db_index  = ucmd.sq_db_index;
584                         qp->rq.db_index  = ucmd.rq_db_index;
585                 }
586
587                 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
588                                      to_mcq(init_attr->send_cq),
589                                      to_mcq(init_attr->recv_cq),
590                                      init_attr->qp_type, init_attr->sq_sig_type,
591                                      &init_attr->cap, qp);
592
593                 if (err && pd->uobject) {
594                         context = to_mucontext(pd->uobject->context);
595
596                         mthca_unmap_user_db(to_mdev(pd->device),
597                                             &context->uar,
598                                             context->db_tab,
599                                             ucmd.sq_db_index);
600                         mthca_unmap_user_db(to_mdev(pd->device),
601                                             &context->uar,
602                                             context->db_tab,
603                                             ucmd.rq_db_index);
604                 }
605
606                 qp->ibqp.qp_num = qp->qpn;
607                 break;
608         }
609         case IB_QPT_SMI:
610         case IB_QPT_GSI:
611         {
612                 /* Don't allow userspace to create special QPs */
613                 if (pd->uobject)
614                         return ERR_PTR(-EINVAL);
615
616                 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
617                 if (!qp)
618                         return ERR_PTR(-ENOMEM);
619
620                 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
621
622                 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
623                                       to_mcq(init_attr->send_cq),
624                                       to_mcq(init_attr->recv_cq),
625                                       init_attr->sq_sig_type, &init_attr->cap,
626                                       qp->ibqp.qp_num, init_attr->port_num,
627                                       to_msqp(qp));
628                 break;
629         }
630         default:
631                 /* Don't support raw QPs */
632                 return ERR_PTR(-ENOSYS);
633         }
634
635         if (err) {
636                 kfree(qp);
637                 return ERR_PTR(err);
638         }
639
640         init_attr->cap.max_send_wr     = qp->sq.max;
641         init_attr->cap.max_recv_wr     = qp->rq.max;
642         init_attr->cap.max_send_sge    = qp->sq.max_gs;
643         init_attr->cap.max_recv_sge    = qp->rq.max_gs;
644         init_attr->cap.max_inline_data = qp->max_inline_data;
645
646         return &qp->ibqp;
647 }
648
649 static int mthca_destroy_qp(struct ib_qp *qp)
650 {
651         if (qp->uobject) {
652                 mthca_unmap_user_db(to_mdev(qp->device),
653                                     &to_mucontext(qp->uobject->context)->uar,
654                                     to_mucontext(qp->uobject->context)->db_tab,
655                                     to_mqp(qp)->sq.db_index);
656                 mthca_unmap_user_db(to_mdev(qp->device),
657                                     &to_mucontext(qp->uobject->context)->uar,
658                                     to_mucontext(qp->uobject->context)->db_tab,
659                                     to_mqp(qp)->rq.db_index);
660         }
661         mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
662         kfree(qp);
663         return 0;
664 }
665
666 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
667                                      int comp_vector,
668                                      struct ib_ucontext *context,
669                                      struct ib_udata *udata)
670 {
671         struct mthca_create_cq ucmd;
672         struct mthca_cq *cq;
673         int nent;
674         int err;
675
676         if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
677                 return ERR_PTR(-EINVAL);
678
679         if (context) {
680                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
681                         return ERR_PTR(-EFAULT);
682
683                 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
684                                         to_mucontext(context)->db_tab,
685                                         ucmd.set_db_index, ucmd.set_db_page);
686                 if (err)
687                         return ERR_PTR(err);
688
689                 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
690                                         to_mucontext(context)->db_tab,
691                                         ucmd.arm_db_index, ucmd.arm_db_page);
692                 if (err)
693                         goto err_unmap_set;
694         }
695
696         cq = kmalloc(sizeof *cq, GFP_KERNEL);
697         if (!cq) {
698                 err = -ENOMEM;
699                 goto err_unmap_arm;
700         }
701
702         if (context) {
703                 cq->buf.mr.ibmr.lkey = ucmd.lkey;
704                 cq->set_ci_db_index  = ucmd.set_db_index;
705                 cq->arm_db_index     = ucmd.arm_db_index;
706         }
707
708         for (nent = 1; nent <= entries; nent <<= 1)
709                 ; /* nothing */
710
711         err = mthca_init_cq(to_mdev(ibdev), nent,
712                             context ? to_mucontext(context) : NULL,
713                             context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
714                             cq);
715         if (err)
716                 goto err_free;
717
718         if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
719                 mthca_free_cq(to_mdev(ibdev), cq);
720                 goto err_free;
721         }
722
723         cq->resize_buf = NULL;
724
725         return &cq->ibcq;
726
727 err_free:
728         kfree(cq);
729
730 err_unmap_arm:
731         if (context)
732                 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
733                                     to_mucontext(context)->db_tab, ucmd.arm_db_index);
734
735 err_unmap_set:
736         if (context)
737                 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
738                                     to_mucontext(context)->db_tab, ucmd.set_db_index);
739
740         return ERR_PTR(err);
741 }
742
743 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
744                                   int entries)
745 {
746         int ret;
747
748         spin_lock_irq(&cq->lock);
749         if (cq->resize_buf) {
750                 ret = -EBUSY;
751                 goto unlock;
752         }
753
754         cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
755         if (!cq->resize_buf) {
756                 ret = -ENOMEM;
757                 goto unlock;
758         }
759
760         cq->resize_buf->state = CQ_RESIZE_ALLOC;
761
762         ret = 0;
763
764 unlock:
765         spin_unlock_irq(&cq->lock);
766
767         if (ret)
768                 return ret;
769
770         ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
771         if (ret) {
772                 spin_lock_irq(&cq->lock);
773                 kfree(cq->resize_buf);
774                 cq->resize_buf = NULL;
775                 spin_unlock_irq(&cq->lock);
776                 return ret;
777         }
778
779         cq->resize_buf->cqe = entries - 1;
780
781         spin_lock_irq(&cq->lock);
782         cq->resize_buf->state = CQ_RESIZE_READY;
783         spin_unlock_irq(&cq->lock);
784
785         return 0;
786 }
787
788 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
789 {
790         struct mthca_dev *dev = to_mdev(ibcq->device);
791         struct mthca_cq *cq = to_mcq(ibcq);
792         struct mthca_resize_cq ucmd;
793         u32 lkey;
794         u8 status;
795         int ret;
796
797         if (entries < 1 || entries > dev->limits.max_cqes)
798                 return -EINVAL;
799
800         mutex_lock(&cq->mutex);
801
802         entries = roundup_pow_of_two(entries + 1);
803         if (entries == ibcq->cqe + 1) {
804                 ret = 0;
805                 goto out;
806         }
807
808         if (cq->is_kernel) {
809                 ret = mthca_alloc_resize_buf(dev, cq, entries);
810                 if (ret)
811                         goto out;
812                 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
813         } else {
814                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
815                         ret = -EFAULT;
816                         goto out;
817                 }
818                 lkey = ucmd.lkey;
819         }
820
821         ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status);
822         if (status)
823                 ret = -EINVAL;
824
825         if (ret) {
826                 if (cq->resize_buf) {
827                         mthca_free_cq_buf(dev, &cq->resize_buf->buf,
828                                           cq->resize_buf->cqe);
829                         kfree(cq->resize_buf);
830                         spin_lock_irq(&cq->lock);
831                         cq->resize_buf = NULL;
832                         spin_unlock_irq(&cq->lock);
833                 }
834                 goto out;
835         }
836
837         if (cq->is_kernel) {
838                 struct mthca_cq_buf tbuf;
839                 int tcqe;
840
841                 spin_lock_irq(&cq->lock);
842                 if (cq->resize_buf->state == CQ_RESIZE_READY) {
843                         mthca_cq_resize_copy_cqes(cq);
844                         tbuf         = cq->buf;
845                         tcqe         = cq->ibcq.cqe;
846                         cq->buf      = cq->resize_buf->buf;
847                         cq->ibcq.cqe = cq->resize_buf->cqe;
848                 } else {
849                         tbuf = cq->resize_buf->buf;
850                         tcqe = cq->resize_buf->cqe;
851                 }
852
853                 kfree(cq->resize_buf);
854                 cq->resize_buf = NULL;
855                 spin_unlock_irq(&cq->lock);
856
857                 mthca_free_cq_buf(dev, &tbuf, tcqe);
858         } else
859                 ibcq->cqe = entries - 1;
860
861 out:
862         mutex_unlock(&cq->mutex);
863
864         return ret;
865 }
866
867 static int mthca_destroy_cq(struct ib_cq *cq)
868 {
869         if (cq->uobject) {
870                 mthca_unmap_user_db(to_mdev(cq->device),
871                                     &to_mucontext(cq->uobject->context)->uar,
872                                     to_mucontext(cq->uobject->context)->db_tab,
873                                     to_mcq(cq)->arm_db_index);
874                 mthca_unmap_user_db(to_mdev(cq->device),
875                                     &to_mucontext(cq->uobject->context)->uar,
876                                     to_mucontext(cq->uobject->context)->db_tab,
877                                     to_mcq(cq)->set_ci_db_index);
878         }
879         mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
880         kfree(cq);
881
882         return 0;
883 }
884
885 static inline u32 convert_access(int acc)
886 {
887         return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC       : 0) |
888                (acc & IB_ACCESS_REMOTE_WRITE  ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
889                (acc & IB_ACCESS_REMOTE_READ   ? MTHCA_MPT_FLAG_REMOTE_READ  : 0) |
890                (acc & IB_ACCESS_LOCAL_WRITE   ? MTHCA_MPT_FLAG_LOCAL_WRITE  : 0) |
891                MTHCA_MPT_FLAG_LOCAL_READ;
892 }
893
894 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
895 {
896         struct mthca_mr *mr;
897         int err;
898
899         mr = kmalloc(sizeof *mr, GFP_KERNEL);
900         if (!mr)
901                 return ERR_PTR(-ENOMEM);
902
903         err = mthca_mr_alloc_notrans(to_mdev(pd->device),
904                                      to_mpd(pd)->pd_num,
905                                      convert_access(acc), mr);
906
907         if (err) {
908                 kfree(mr);
909                 return ERR_PTR(err);
910         }
911
912         mr->umem = NULL;
913
914         return &mr->ibmr;
915 }
916
917 static struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
918                                        struct ib_phys_buf *buffer_list,
919                                        int                 num_phys_buf,
920                                        int                 acc,
921                                        u64                *iova_start)
922 {
923         struct mthca_mr *mr;
924         u64 *page_list;
925         u64 total_size;
926         unsigned long mask;
927         int shift;
928         int npages;
929         int err;
930         int i, j, n;
931
932         mask = buffer_list[0].addr ^ *iova_start;
933         total_size = 0;
934         for (i = 0; i < num_phys_buf; ++i) {
935                 if (i != 0)
936                         mask |= buffer_list[i].addr;
937                 if (i != num_phys_buf - 1)
938                         mask |= buffer_list[i].addr + buffer_list[i].size;
939
940                 total_size += buffer_list[i].size;
941         }
942
943         if (mask & ~PAGE_MASK)
944                 return ERR_PTR(-EINVAL);
945
946         shift = __ffs(mask | 1 << 31);
947
948         buffer_list[0].size += buffer_list[0].addr & ((1ULL << shift) - 1);
949         buffer_list[0].addr &= ~0ull << shift;
950
951         mr = kmalloc(sizeof *mr, GFP_KERNEL);
952         if (!mr)
953                 return ERR_PTR(-ENOMEM);
954
955         npages = 0;
956         for (i = 0; i < num_phys_buf; ++i)
957                 npages += (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
958
959         if (!npages)
960                 return &mr->ibmr;
961
962         page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL);
963         if (!page_list) {
964                 kfree(mr);
965                 return ERR_PTR(-ENOMEM);
966         }
967
968         n = 0;
969         for (i = 0; i < num_phys_buf; ++i)
970                 for (j = 0;
971                      j < (buffer_list[i].size + (1ULL << shift) - 1) >> shift;
972                      ++j)
973                         page_list[n++] = buffer_list[i].addr + ((u64) j << shift);
974
975         mthca_dbg(to_mdev(pd->device), "Registering memory at %llx (iova %llx) "
976                   "in PD %x; shift %d, npages %d.\n",
977                   (unsigned long long) buffer_list[0].addr,
978                   (unsigned long long) *iova_start,
979                   to_mpd(pd)->pd_num,
980                   shift, npages);
981
982         err = mthca_mr_alloc_phys(to_mdev(pd->device),
983                                   to_mpd(pd)->pd_num,
984                                   page_list, shift, npages,
985                                   *iova_start, total_size,
986                                   convert_access(acc), mr);
987
988         if (err) {
989                 kfree(page_list);
990                 kfree(mr);
991                 return ERR_PTR(err);
992         }
993
994         kfree(page_list);
995         mr->umem = NULL;
996
997         return &mr->ibmr;
998 }
999
1000 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1001                                        u64 virt, int acc, struct ib_udata *udata)
1002 {
1003         struct mthca_dev *dev = to_mdev(pd->device);
1004         struct ib_umem_chunk *chunk;
1005         struct mthca_mr *mr;
1006         u64 *pages;
1007         int shift, n, len;
1008         int i, j, k;
1009         int err = 0;
1010         int write_mtt_size;
1011
1012         mr = kmalloc(sizeof *mr, GFP_KERNEL);
1013         if (!mr)
1014                 return ERR_PTR(-ENOMEM);
1015
1016         mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
1017         if (IS_ERR(mr->umem)) {
1018                 err = PTR_ERR(mr->umem);
1019                 goto err;
1020         }
1021
1022         shift = ffs(mr->umem->page_size) - 1;
1023
1024         n = 0;
1025         list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1026                 n += chunk->nents;
1027
1028         mr->mtt = mthca_alloc_mtt(dev, n);
1029         if (IS_ERR(mr->mtt)) {
1030                 err = PTR_ERR(mr->mtt);
1031                 goto err_umem;
1032         }
1033
1034         pages = (u64 *) __get_free_page(GFP_KERNEL);
1035         if (!pages) {
1036                 err = -ENOMEM;
1037                 goto err_mtt;
1038         }
1039
1040         i = n = 0;
1041
1042         write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
1043
1044         list_for_each_entry(chunk, &mr->umem->chunk_list, list)
1045                 for (j = 0; j < chunk->nmap; ++j) {
1046                         len = sg_dma_len(&chunk->page_list[j]) >> shift;
1047                         for (k = 0; k < len; ++k) {
1048                                 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
1049                                         mr->umem->page_size * k;
1050                                 /*
1051                                  * Be friendly to write_mtt and pass it chunks
1052                                  * of appropriate size.
1053                                  */
1054                                 if (i == write_mtt_size) {
1055                                         err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
1056                                         if (err)
1057                                                 goto mtt_done;
1058                                         n += i;
1059                                         i = 0;
1060                                 }
1061                         }
1062                 }
1063
1064         if (i)
1065                 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
1066 mtt_done:
1067         free_page((unsigned long) pages);
1068         if (err)
1069                 goto err_mtt;
1070
1071         err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
1072                              convert_access(acc), mr);
1073
1074         if (err)
1075                 goto err_mtt;
1076
1077         return &mr->ibmr;
1078
1079 err_mtt:
1080         mthca_free_mtt(dev, mr->mtt);
1081
1082 err_umem:
1083         ib_umem_release(mr->umem);
1084
1085 err:
1086         kfree(mr);
1087         return ERR_PTR(err);
1088 }
1089
1090 static int mthca_dereg_mr(struct ib_mr *mr)
1091 {
1092         struct mthca_mr *mmr = to_mmr(mr);
1093
1094         mthca_free_mr(to_mdev(mr->device), mmr);
1095         if (mmr->umem)
1096                 ib_umem_release(mmr->umem);
1097         kfree(mmr);
1098
1099         return 0;
1100 }
1101
1102 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1103                                       struct ib_fmr_attr *fmr_attr)
1104 {
1105         struct mthca_fmr *fmr;
1106         int err;
1107
1108         fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1109         if (!fmr)
1110                 return ERR_PTR(-ENOMEM);
1111
1112         memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1113         err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1114                              convert_access(mr_access_flags), fmr);
1115
1116         if (err) {
1117                 kfree(fmr);
1118                 return ERR_PTR(err);
1119         }
1120
1121         return &fmr->ibmr;
1122 }
1123
1124 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1125 {
1126         struct mthca_fmr *mfmr = to_mfmr(fmr);
1127         int err;
1128
1129         err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1130         if (err)
1131                 return err;
1132
1133         kfree(mfmr);
1134         return 0;
1135 }
1136
1137 static int mthca_unmap_fmr(struct list_head *fmr_list)
1138 {
1139         struct ib_fmr *fmr;
1140         int err;
1141         u8 status;
1142         struct mthca_dev *mdev = NULL;
1143
1144         list_for_each_entry(fmr, fmr_list, list) {
1145                 if (mdev && to_mdev(fmr->device) != mdev)
1146                         return -EINVAL;
1147                 mdev = to_mdev(fmr->device);
1148         }
1149
1150         if (!mdev)
1151                 return 0;
1152
1153         if (mthca_is_memfree(mdev)) {
1154                 list_for_each_entry(fmr, fmr_list, list)
1155                         mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1156
1157                 wmb();
1158         } else
1159                 list_for_each_entry(fmr, fmr_list, list)
1160                         mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1161
1162         err = mthca_SYNC_TPT(mdev, &status);
1163         if (err)
1164                 return err;
1165         if (status)
1166                 return -EINVAL;
1167         return 0;
1168 }
1169
1170 static ssize_t show_rev(struct class_device *cdev, char *buf)
1171 {
1172         struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1173         return sprintf(buf, "%x\n", dev->rev_id);
1174 }
1175
1176 static ssize_t show_fw_ver(struct class_device *cdev, char *buf)
1177 {
1178         struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1179         return sprintf(buf, "%d.%d.%d\n", (int) (dev->fw_ver >> 32),
1180                        (int) (dev->fw_ver >> 16) & 0xffff,
1181                        (int) dev->fw_ver & 0xffff);
1182 }
1183
1184 static ssize_t show_hca(struct class_device *cdev, char *buf)
1185 {
1186         struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1187         switch (dev->pdev->device) {
1188         case PCI_DEVICE_ID_MELLANOX_TAVOR:
1189                 return sprintf(buf, "MT23108\n");
1190         case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1191                 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1192         case PCI_DEVICE_ID_MELLANOX_ARBEL:
1193                 return sprintf(buf, "MT25208\n");
1194         case PCI_DEVICE_ID_MELLANOX_SINAI:
1195         case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1196                 return sprintf(buf, "MT25204\n");
1197         default:
1198                 return sprintf(buf, "unknown\n");
1199         }
1200 }
1201
1202 static ssize_t show_board(struct class_device *cdev, char *buf)
1203 {
1204         struct mthca_dev *dev = container_of(cdev, struct mthca_dev, ib_dev.class_dev);
1205         return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1206 }
1207
1208 static CLASS_DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
1209 static CLASS_DEVICE_ATTR(fw_ver,   S_IRUGO, show_fw_ver, NULL);
1210 static CLASS_DEVICE_ATTR(hca_type, S_IRUGO, show_hca,    NULL);
1211 static CLASS_DEVICE_ATTR(board_id, S_IRUGO, show_board,  NULL);
1212
1213 static struct class_device_attribute *mthca_class_attributes[] = {
1214         &class_device_attr_hw_rev,
1215         &class_device_attr_fw_ver,
1216         &class_device_attr_hca_type,
1217         &class_device_attr_board_id
1218 };
1219
1220 static int mthca_init_node_data(struct mthca_dev *dev)
1221 {
1222         struct ib_smp *in_mad  = NULL;
1223         struct ib_smp *out_mad = NULL;
1224         int err = -ENOMEM;
1225         u8 status;
1226
1227         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1228         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1229         if (!in_mad || !out_mad)
1230                 goto out;
1231
1232         init_query_mad(in_mad);
1233         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1234
1235         err = mthca_MAD_IFC(dev, 1, 1,
1236                             1, NULL, NULL, in_mad, out_mad,
1237                             &status);
1238         if (err)
1239                 goto out;
1240         if (status) {
1241                 err = -EINVAL;
1242                 goto out;
1243         }
1244
1245         memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1246
1247         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1248
1249         err = mthca_MAD_IFC(dev, 1, 1,
1250                             1, NULL, NULL, in_mad, out_mad,
1251                             &status);
1252         if (err)
1253                 goto out;
1254         if (status) {
1255                 err = -EINVAL;
1256                 goto out;
1257         }
1258
1259         if (mthca_is_memfree(dev))
1260                 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1261         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1262
1263 out:
1264         kfree(in_mad);
1265         kfree(out_mad);
1266         return err;
1267 }
1268
1269 int mthca_register_device(struct mthca_dev *dev)
1270 {
1271         int ret;
1272         int i;
1273
1274         ret = mthca_init_node_data(dev);
1275         if (ret)
1276                 return ret;
1277
1278         strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
1279         dev->ib_dev.owner                = THIS_MODULE;
1280
1281         dev->ib_dev.uverbs_abi_ver       = MTHCA_UVERBS_ABI_VERSION;
1282         dev->ib_dev.uverbs_cmd_mask      =
1283                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
1284                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
1285                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
1286                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
1287                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
1288                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
1289                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
1290                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1291                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
1292                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
1293                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
1294                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
1295                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
1296                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
1297                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
1298                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
1299                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1300         dev->ib_dev.node_type            = RDMA_NODE_IB_CA;
1301         dev->ib_dev.phys_port_cnt        = dev->limits.num_ports;
1302         dev->ib_dev.num_comp_vectors     = 1;
1303         dev->ib_dev.dma_device           = &dev->pdev->dev;
1304         dev->ib_dev.query_device         = mthca_query_device;
1305         dev->ib_dev.query_port           = mthca_query_port;
1306         dev->ib_dev.modify_device        = mthca_modify_device;
1307         dev->ib_dev.modify_port          = mthca_modify_port;
1308         dev->ib_dev.query_pkey           = mthca_query_pkey;
1309         dev->ib_dev.query_gid            = mthca_query_gid;
1310         dev->ib_dev.alloc_ucontext       = mthca_alloc_ucontext;
1311         dev->ib_dev.dealloc_ucontext     = mthca_dealloc_ucontext;
1312         dev->ib_dev.mmap                 = mthca_mmap_uar;
1313         dev->ib_dev.alloc_pd             = mthca_alloc_pd;
1314         dev->ib_dev.dealloc_pd           = mthca_dealloc_pd;
1315         dev->ib_dev.create_ah            = mthca_ah_create;
1316         dev->ib_dev.query_ah             = mthca_ah_query;
1317         dev->ib_dev.destroy_ah           = mthca_ah_destroy;
1318
1319         if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1320                 dev->ib_dev.create_srq           = mthca_create_srq;
1321                 dev->ib_dev.modify_srq           = mthca_modify_srq;
1322                 dev->ib_dev.query_srq            = mthca_query_srq;
1323                 dev->ib_dev.destroy_srq          = mthca_destroy_srq;
1324                 dev->ib_dev.uverbs_cmd_mask     |=
1325                         (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
1326                         (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
1327                         (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
1328                         (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1329
1330                 if (mthca_is_memfree(dev))
1331                         dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv;
1332                 else
1333                         dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv;
1334         }
1335
1336         dev->ib_dev.create_qp            = mthca_create_qp;
1337         dev->ib_dev.modify_qp            = mthca_modify_qp;
1338         dev->ib_dev.query_qp             = mthca_query_qp;
1339         dev->ib_dev.destroy_qp           = mthca_destroy_qp;
1340         dev->ib_dev.create_cq            = mthca_create_cq;
1341         dev->ib_dev.resize_cq            = mthca_resize_cq;
1342         dev->ib_dev.destroy_cq           = mthca_destroy_cq;
1343         dev->ib_dev.poll_cq              = mthca_poll_cq;
1344         dev->ib_dev.get_dma_mr           = mthca_get_dma_mr;
1345         dev->ib_dev.reg_phys_mr          = mthca_reg_phys_mr;
1346         dev->ib_dev.reg_user_mr          = mthca_reg_user_mr;
1347         dev->ib_dev.dereg_mr             = mthca_dereg_mr;
1348
1349         if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1350                 dev->ib_dev.alloc_fmr            = mthca_alloc_fmr;
1351                 dev->ib_dev.unmap_fmr            = mthca_unmap_fmr;
1352                 dev->ib_dev.dealloc_fmr          = mthca_dealloc_fmr;
1353                 if (mthca_is_memfree(dev))
1354                         dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
1355                 else
1356                         dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
1357         }
1358
1359         dev->ib_dev.attach_mcast         = mthca_multicast_attach;
1360         dev->ib_dev.detach_mcast         = mthca_multicast_detach;
1361         dev->ib_dev.process_mad          = mthca_process_mad;
1362
1363         if (mthca_is_memfree(dev)) {
1364                 dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
1365                 dev->ib_dev.post_send     = mthca_arbel_post_send;
1366                 dev->ib_dev.post_recv     = mthca_arbel_post_receive;
1367         } else {
1368                 dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq;
1369                 dev->ib_dev.post_send     = mthca_tavor_post_send;
1370                 dev->ib_dev.post_recv     = mthca_tavor_post_receive;
1371         }
1372
1373         mutex_init(&dev->cap_mask_mutex);
1374
1375         ret = ib_register_device(&dev->ib_dev);
1376         if (ret)
1377                 return ret;
1378
1379         for (i = 0; i < ARRAY_SIZE(mthca_class_attributes); ++i) {
1380                 ret = class_device_create_file(&dev->ib_dev.class_dev,
1381                                                mthca_class_attributes[i]);
1382                 if (ret) {
1383                         ib_unregister_device(&dev->ib_dev);
1384                         return ret;
1385                 }
1386         }
1387
1388         mthca_start_catas_poll(dev);
1389
1390         return 0;
1391 }
1392
1393 void mthca_unregister_device(struct mthca_dev *dev)
1394 {
1395         mthca_stop_catas_poll(dev);
1396         ib_unregister_device(&dev->ib_dev);
1397 }