Merge master.kernel.org:/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[pandora-kernel.git] / drivers / infiniband / hw / mthca / mthca_main.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * $Id: mthca_main.c 1396 2004-12-28 04:10:27Z roland $
35  */
36
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/errno.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42
43 #include "mthca_dev.h"
44 #include "mthca_config_reg.h"
45 #include "mthca_cmd.h"
46 #include "mthca_profile.h"
47 #include "mthca_memfree.h"
48
49 MODULE_AUTHOR("Roland Dreier");
50 MODULE_DESCRIPTION("Mellanox InfiniBand HCA low-level driver");
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
55
56 int mthca_debug_level = 0;
57 module_param_named(debug_level, mthca_debug_level, int, 0644);
58 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
59
60 #endif /* CONFIG_INFINIBAND_MTHCA_DEBUG */
61
62 #ifdef CONFIG_PCI_MSI
63
64 static int msi_x = 0;
65 module_param(msi_x, int, 0444);
66 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
67
68 static int msi = 0;
69 module_param(msi, int, 0444);
70 MODULE_PARM_DESC(msi, "attempt to use MSI if nonzero");
71
72 #else /* CONFIG_PCI_MSI */
73
74 #define msi_x (0)
75 #define msi   (0)
76
77 #endif /* CONFIG_PCI_MSI */
78
79 static int tune_pci = 0;
80 module_param(tune_pci, int, 0444);
81 MODULE_PARM_DESC(tune_pci, "increase PCI burst from the default set by BIOS if nonzero");
82
83 struct mutex mthca_device_mutex;
84
85 static const char mthca_version[] __devinitdata =
86         DRV_NAME ": Mellanox InfiniBand HCA driver v"
87         DRV_VERSION " (" DRV_RELDATE ")\n";
88
89 static struct mthca_profile default_profile = {
90         .num_qp            = 1 << 16,
91         .rdb_per_qp        = 4,
92         .num_cq            = 1 << 16,
93         .num_mcg           = 1 << 13,
94         .num_mpt           = 1 << 17,
95         .num_mtt           = 1 << 20,
96         .num_udav          = 1 << 15,   /* Tavor only */
97         .fmr_reserved_mtts = 1 << 18,   /* Tavor only */
98         .uarc_size         = 1 << 18,   /* Arbel only */
99 };
100
101 static int mthca_tune_pci(struct mthca_dev *mdev)
102 {
103         int cap;
104         u16 val;
105
106         if (!tune_pci)
107                 return 0;
108
109         /* First try to max out Read Byte Count */
110         cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
111         if (cap) {
112                 if (pci_read_config_word(mdev->pdev, cap + PCI_X_CMD, &val)) {
113                         mthca_err(mdev, "Couldn't read PCI-X command register, "
114                                   "aborting.\n");
115                         return -ENODEV;
116                 }
117                 val = (val & ~PCI_X_CMD_MAX_READ) | (3 << 2);
118                 if (pci_write_config_word(mdev->pdev, cap + PCI_X_CMD, val)) {
119                         mthca_err(mdev, "Couldn't write PCI-X command register, "
120                                   "aborting.\n");
121                         return -ENODEV;
122                 }
123         } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
124                 mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
125
126         cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);
127         if (cap) {
128                 if (pci_read_config_word(mdev->pdev, cap + PCI_EXP_DEVCTL, &val)) {
129                         mthca_err(mdev, "Couldn't read PCI Express device control "
130                                   "register, aborting.\n");
131                         return -ENODEV;
132                 }
133                 val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);
134                 if (pci_write_config_word(mdev->pdev, cap + PCI_EXP_DEVCTL, val)) {
135                         mthca_err(mdev, "Couldn't write PCI Express device control "
136                                   "register, aborting.\n");
137                         return -ENODEV;
138                 }
139         } else if (mdev->mthca_flags & MTHCA_FLAG_PCIE)
140                 mthca_info(mdev, "No PCI Express capability, "
141                            "not setting Max Read Request Size.\n");
142
143         return 0;
144 }
145
146 static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
147 {
148         int err;
149         u8 status;
150
151         err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
152         if (err) {
153                 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
154                 return err;
155         }
156         if (status) {
157                 mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
158                           "aborting.\n", status);
159                 return -EINVAL;
160         }
161         if (dev_lim->min_page_sz > PAGE_SIZE) {
162                 mthca_err(mdev, "HCA minimum page size of %d bigger than "
163                           "kernel PAGE_SIZE of %ld, aborting.\n",
164                           dev_lim->min_page_sz, PAGE_SIZE);
165                 return -ENODEV;
166         }
167         if (dev_lim->num_ports > MTHCA_MAX_PORTS) {
168                 mthca_err(mdev, "HCA has %d ports, but we only support %d, "
169                           "aborting.\n",
170                           dev_lim->num_ports, MTHCA_MAX_PORTS);
171                 return -ENODEV;
172         }
173
174         if (dev_lim->uar_size > pci_resource_len(mdev->pdev, 2)) {
175                 mthca_err(mdev, "HCA reported UAR size of 0x%x bigger than "
176                           "PCI resource 2 size of 0x%llx, aborting.\n",
177                           dev_lim->uar_size,
178                           (unsigned long long)pci_resource_len(mdev->pdev, 2));
179                 return -ENODEV;
180         }
181
182         mdev->limits.num_ports          = dev_lim->num_ports;
183         mdev->limits.vl_cap             = dev_lim->max_vl;
184         mdev->limits.mtu_cap            = dev_lim->max_mtu;
185         mdev->limits.gid_table_len      = dev_lim->max_gids;
186         mdev->limits.pkey_table_len     = dev_lim->max_pkeys;
187         mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay;
188         mdev->limits.max_sg             = dev_lim->max_sg;
189         mdev->limits.max_wqes           = dev_lim->max_qp_sz;
190         mdev->limits.max_qp_init_rdma   = dev_lim->max_requester_per_qp;
191         mdev->limits.reserved_qps       = dev_lim->reserved_qps;
192         mdev->limits.max_srq_wqes       = dev_lim->max_srq_sz;
193         mdev->limits.reserved_srqs      = dev_lim->reserved_srqs;
194         mdev->limits.reserved_eecs      = dev_lim->reserved_eecs;
195         mdev->limits.max_desc_sz        = dev_lim->max_desc_sz;
196         mdev->limits.max_srq_sge        = mthca_max_srq_sge(mdev);
197         /*
198          * Subtract 1 from the limit because we need to allocate a
199          * spare CQE so the HCA HW can tell the difference between an
200          * empty CQ and a full CQ.
201          */
202         mdev->limits.max_cqes           = dev_lim->max_cq_sz - 1;
203         mdev->limits.reserved_cqs       = dev_lim->reserved_cqs;
204         mdev->limits.reserved_eqs       = dev_lim->reserved_eqs;
205         mdev->limits.reserved_mtts      = dev_lim->reserved_mtts;
206         mdev->limits.reserved_mrws      = dev_lim->reserved_mrws;
207         mdev->limits.reserved_uars      = dev_lim->reserved_uars;
208         mdev->limits.reserved_pds       = dev_lim->reserved_pds;
209         mdev->limits.port_width_cap     = dev_lim->max_port_width;
210         mdev->limits.page_size_cap      = ~(u32) (dev_lim->min_page_sz - 1);
211         mdev->limits.flags              = dev_lim->flags;
212         /*
213          * For old FW that doesn't return static rate support, use a
214          * value of 0x3 (only static rate values of 0 or 1 are handled),
215          * except on Sinai, where even old FW can handle static rate
216          * values of 2 and 3.
217          */
218         if (dev_lim->stat_rate_support)
219                 mdev->limits.stat_rate_support = dev_lim->stat_rate_support;
220         else if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
221                 mdev->limits.stat_rate_support = 0xf;
222         else
223                 mdev->limits.stat_rate_support = 0x3;
224
225         /* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
226            May be doable since hardware supports it for SRQ.
227
228            IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver.
229
230            IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not
231            supported by driver. */
232         mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
233                 IB_DEVICE_PORT_ACTIVE_EVENT |
234                 IB_DEVICE_SYS_IMAGE_GUID |
235                 IB_DEVICE_RC_RNR_NAK_GEN;
236
237         if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR)
238                 mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
239
240         if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR)
241                 mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
242
243         if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI)
244                 mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI;
245
246         if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG)
247                 mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
248
249         if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE)
250                 mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
251
252         if (dev_lim->flags & DEV_LIM_FLAG_SRQ)
253                 mdev->mthca_flags |= MTHCA_FLAG_SRQ;
254
255         return 0;
256 }
257
258 static int mthca_init_tavor(struct mthca_dev *mdev)
259 {
260         u8 status;
261         int err;
262         struct mthca_dev_lim        dev_lim;
263         struct mthca_profile        profile;
264         struct mthca_init_hca_param init_hca;
265
266         err = mthca_SYS_EN(mdev, &status);
267         if (err) {
268                 mthca_err(mdev, "SYS_EN command failed, aborting.\n");
269                 return err;
270         }
271         if (status) {
272                 mthca_err(mdev, "SYS_EN returned status 0x%02x, "
273                           "aborting.\n", status);
274                 return -EINVAL;
275         }
276
277         err = mthca_QUERY_FW(mdev, &status);
278         if (err) {
279                 mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
280                 goto err_disable;
281         }
282         if (status) {
283                 mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
284                           "aborting.\n", status);
285                 err = -EINVAL;
286                 goto err_disable;
287         }
288         err = mthca_QUERY_DDR(mdev, &status);
289         if (err) {
290                 mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
291                 goto err_disable;
292         }
293         if (status) {
294                 mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
295                           "aborting.\n", status);
296                 err = -EINVAL;
297                 goto err_disable;
298         }
299
300         err = mthca_dev_lim(mdev, &dev_lim);
301         if (err) {
302                 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
303                 goto err_disable;
304         }
305
306         profile = default_profile;
307         profile.num_uar   = dev_lim.uar_size / PAGE_SIZE;
308         profile.uarc_size = 0;
309         if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
310                 profile.num_srq = dev_lim.max_srqs;
311
312         err = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
313         if (err < 0)
314                 goto err_disable;
315
316         err = mthca_INIT_HCA(mdev, &init_hca, &status);
317         if (err) {
318                 mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
319                 goto err_disable;
320         }
321         if (status) {
322                 mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
323                           "aborting.\n", status);
324                 err = -EINVAL;
325                 goto err_disable;
326         }
327
328         return 0;
329
330 err_disable:
331         mthca_SYS_DIS(mdev, &status);
332
333         return err;
334 }
335
336 static int mthca_load_fw(struct mthca_dev *mdev)
337 {
338         u8 status;
339         int err;
340
341         /* FIXME: use HCA-attached memory for FW if present */
342
343         mdev->fw.arbel.fw_icm =
344                 mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
345                                 GFP_HIGHUSER | __GFP_NOWARN);
346         if (!mdev->fw.arbel.fw_icm) {
347                 mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
348                 return -ENOMEM;
349         }
350
351         err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
352         if (err) {
353                 mthca_err(mdev, "MAP_FA command failed, aborting.\n");
354                 goto err_free;
355         }
356         if (status) {
357                 mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
358                 err = -EINVAL;
359                 goto err_free;
360         }
361         err = mthca_RUN_FW(mdev, &status);
362         if (err) {
363                 mthca_err(mdev, "RUN_FW command failed, aborting.\n");
364                 goto err_unmap_fa;
365         }
366         if (status) {
367                 mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
368                 err = -EINVAL;
369                 goto err_unmap_fa;
370         }
371
372         return 0;
373
374 err_unmap_fa:
375         mthca_UNMAP_FA(mdev, &status);
376
377 err_free:
378         mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
379         return err;
380 }
381
382 static int mthca_init_icm(struct mthca_dev *mdev,
383                           struct mthca_dev_lim *dev_lim,
384                           struct mthca_init_hca_param *init_hca,
385                           u64 icm_size)
386 {
387         u64 aux_pages;
388         u8 status;
389         int err;
390
391         err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
392         if (err) {
393                 mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
394                 return err;
395         }
396         if (status) {
397                 mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
398                           "aborting.\n", status);
399                 return -EINVAL;
400         }
401
402         mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
403                   (unsigned long long) icm_size >> 10,
404                   (unsigned long long) aux_pages << 2);
405
406         mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
407                                                  GFP_HIGHUSER | __GFP_NOWARN);
408         if (!mdev->fw.arbel.aux_icm) {
409                 mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
410                 return -ENOMEM;
411         }
412
413         err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
414         if (err) {
415                 mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
416                 goto err_free_aux;
417         }
418         if (status) {
419                 mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
420                 err = -EINVAL;
421                 goto err_free_aux;
422         }
423
424         err = mthca_map_eq_icm(mdev, init_hca->eqc_base);
425         if (err) {
426                 mthca_err(mdev, "Failed to map EQ context memory, aborting.\n");
427                 goto err_unmap_aux;
428         }
429
430         mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
431                                                          MTHCA_MTT_SEG_SIZE,
432                                                          mdev->limits.num_mtt_segs,
433                                                          mdev->limits.reserved_mtts, 1);
434         if (!mdev->mr_table.mtt_table) {
435                 mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
436                 err = -ENOMEM;
437                 goto err_unmap_eq;
438         }
439
440         mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
441                                                          dev_lim->mpt_entry_sz,
442                                                          mdev->limits.num_mpts,
443                                                          mdev->limits.reserved_mrws, 1);
444         if (!mdev->mr_table.mpt_table) {
445                 mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
446                 err = -ENOMEM;
447                 goto err_unmap_mtt;
448         }
449
450         mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
451                                                         dev_lim->qpc_entry_sz,
452                                                         mdev->limits.num_qps,
453                                                         mdev->limits.reserved_qps, 0);
454         if (!mdev->qp_table.qp_table) {
455                 mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
456                 err = -ENOMEM;
457                 goto err_unmap_mpt;
458         }
459
460         mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
461                                                          dev_lim->eqpc_entry_sz,
462                                                          mdev->limits.num_qps,
463                                                          mdev->limits.reserved_qps, 0);
464         if (!mdev->qp_table.eqp_table) {
465                 mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
466                 err = -ENOMEM;
467                 goto err_unmap_qp;
468         }
469
470         mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
471                                                          MTHCA_RDB_ENTRY_SIZE,
472                                                          mdev->limits.num_qps <<
473                                                          mdev->qp_table.rdb_shift,
474                                                          0, 0);
475         if (!mdev->qp_table.rdb_table) {
476                 mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
477                 err = -ENOMEM;
478                 goto err_unmap_eqp;
479         }
480
481        mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
482                                                     dev_lim->cqc_entry_sz,
483                                                     mdev->limits.num_cqs,
484                                                     mdev->limits.reserved_cqs, 0);
485         if (!mdev->cq_table.table) {
486                 mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
487                 err = -ENOMEM;
488                 goto err_unmap_rdb;
489         }
490
491         if (mdev->mthca_flags & MTHCA_FLAG_SRQ) {
492                 mdev->srq_table.table =
493                         mthca_alloc_icm_table(mdev, init_hca->srqc_base,
494                                               dev_lim->srq_entry_sz,
495                                               mdev->limits.num_srqs,
496                                               mdev->limits.reserved_srqs, 0);
497                 if (!mdev->srq_table.table) {
498                         mthca_err(mdev, "Failed to map SRQ context memory, "
499                                   "aborting.\n");
500                         err = -ENOMEM;
501                         goto err_unmap_cq;
502                 }
503         }
504
505         /*
506          * It's not strictly required, but for simplicity just map the
507          * whole multicast group table now.  The table isn't very big
508          * and it's a lot easier than trying to track ref counts.
509          */
510         mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base,
511                                                       MTHCA_MGM_ENTRY_SIZE,
512                                                       mdev->limits.num_mgms +
513                                                       mdev->limits.num_amgms,
514                                                       mdev->limits.num_mgms +
515                                                       mdev->limits.num_amgms,
516                                                       0);
517         if (!mdev->mcg_table.table) {
518                 mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
519                 err = -ENOMEM;
520                 goto err_unmap_srq;
521         }
522
523         return 0;
524
525 err_unmap_srq:
526         if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
527                 mthca_free_icm_table(mdev, mdev->srq_table.table);
528
529 err_unmap_cq:
530         mthca_free_icm_table(mdev, mdev->cq_table.table);
531
532 err_unmap_rdb:
533         mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
534
535 err_unmap_eqp:
536         mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
537
538 err_unmap_qp:
539         mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
540
541 err_unmap_mpt:
542         mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
543
544 err_unmap_mtt:
545         mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
546
547 err_unmap_eq:
548         mthca_unmap_eq_icm(mdev);
549
550 err_unmap_aux:
551         mthca_UNMAP_ICM_AUX(mdev, &status);
552
553 err_free_aux:
554         mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
555
556         return err;
557 }
558
559 static void mthca_free_icms(struct mthca_dev *mdev)
560 {
561         u8 status;
562
563         mthca_free_icm_table(mdev, mdev->mcg_table.table);
564         if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
565                 mthca_free_icm_table(mdev, mdev->srq_table.table);
566         mthca_free_icm_table(mdev, mdev->cq_table.table);
567         mthca_free_icm_table(mdev, mdev->qp_table.rdb_table);
568         mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
569         mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
570         mthca_free_icm_table(mdev, mdev->mr_table.mpt_table);
571         mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
572         mthca_unmap_eq_icm(mdev);
573
574         mthca_UNMAP_ICM_AUX(mdev, &status);
575         mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
576 }
577
578 static int mthca_init_arbel(struct mthca_dev *mdev)
579 {
580         struct mthca_dev_lim        dev_lim;
581         struct mthca_profile        profile;
582         struct mthca_init_hca_param init_hca;
583         u64 icm_size;
584         u8 status;
585         int err;
586
587         err = mthca_QUERY_FW(mdev, &status);
588         if (err) {
589                 mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
590                 return err;
591         }
592         if (status) {
593                 mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
594                           "aborting.\n", status);
595                 return -EINVAL;
596         }
597
598         err = mthca_ENABLE_LAM(mdev, &status);
599         if (err) {
600                 mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
601                 return err;
602         }
603         if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
604                 mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
605                 mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
606         } else if (status) {
607                 mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
608                           "aborting.\n", status);
609                 return -EINVAL;
610         }
611
612         err = mthca_load_fw(mdev);
613         if (err) {
614                 mthca_err(mdev, "Failed to start FW, aborting.\n");
615                 goto err_disable;
616         }
617
618         err = mthca_dev_lim(mdev, &dev_lim);
619         if (err) {
620                 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
621                 goto err_stop_fw;
622         }
623
624         profile = default_profile;
625         profile.num_uar  = dev_lim.uar_size / PAGE_SIZE;
626         profile.num_udav = 0;
627         if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
628                 profile.num_srq = dev_lim.max_srqs;
629
630         icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca);
631         if ((int) icm_size < 0) {
632                 err = icm_size;
633                 goto err_stop_fw;
634         }
635
636         err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size);
637         if (err)
638                 goto err_stop_fw;
639
640         err = mthca_INIT_HCA(mdev, &init_hca, &status);
641         if (err) {
642                 mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
643                 goto err_free_icm;
644         }
645         if (status) {
646                 mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
647                           "aborting.\n", status);
648                 err = -EINVAL;
649                 goto err_free_icm;
650         }
651
652         return 0;
653
654 err_free_icm:
655         mthca_free_icms(mdev);
656
657 err_stop_fw:
658         mthca_UNMAP_FA(mdev, &status);
659         mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
660
661 err_disable:
662         if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
663                 mthca_DISABLE_LAM(mdev, &status);
664
665         return err;
666 }
667
668 static void mthca_close_hca(struct mthca_dev *mdev)
669 {
670         u8 status;
671
672         mthca_CLOSE_HCA(mdev, 0, &status);
673
674         if (mthca_is_memfree(mdev)) {
675                 mthca_free_icms(mdev);
676
677                 mthca_UNMAP_FA(mdev, &status);
678                 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
679
680                 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
681                         mthca_DISABLE_LAM(mdev, &status);
682         } else
683                 mthca_SYS_DIS(mdev, &status);
684 }
685
686 static int mthca_init_hca(struct mthca_dev *mdev)
687 {
688         u8 status;
689         int err;
690         struct mthca_adapter adapter;
691
692         if (mthca_is_memfree(mdev))
693                 err = mthca_init_arbel(mdev);
694         else
695                 err = mthca_init_tavor(mdev);
696
697         if (err)
698                 return err;
699
700         err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
701         if (err) {
702                 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
703                 goto err_close;
704         }
705         if (status) {
706                 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
707                           "aborting.\n", status);
708                 err = -EINVAL;
709                 goto err_close;
710         }
711
712         mdev->eq_table.inta_pin = adapter.inta_pin;
713         mdev->rev_id            = adapter.revision_id;
714         memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id);
715
716         return 0;
717
718 err_close:
719         mthca_close_hca(mdev);
720         return err;
721 }
722
723 static int mthca_setup_hca(struct mthca_dev *dev)
724 {
725         int err;
726         u8 status;
727
728         MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
729
730         err = mthca_init_uar_table(dev);
731         if (err) {
732                 mthca_err(dev, "Failed to initialize "
733                           "user access region table, aborting.\n");
734                 return err;
735         }
736
737         err = mthca_uar_alloc(dev, &dev->driver_uar);
738         if (err) {
739                 mthca_err(dev, "Failed to allocate driver access region, "
740                           "aborting.\n");
741                 goto err_uar_table_free;
742         }
743
744         dev->kar = ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
745         if (!dev->kar) {
746                 mthca_err(dev, "Couldn't map kernel access region, "
747                           "aborting.\n");
748                 err = -ENOMEM;
749                 goto err_uar_free;
750         }
751
752         err = mthca_init_pd_table(dev);
753         if (err) {
754                 mthca_err(dev, "Failed to initialize "
755                           "protection domain table, aborting.\n");
756                 goto err_kar_unmap;
757         }
758
759         err = mthca_init_mr_table(dev);
760         if (err) {
761                 mthca_err(dev, "Failed to initialize "
762                           "memory region table, aborting.\n");
763                 goto err_pd_table_free;
764         }
765
766         err = mthca_pd_alloc(dev, 1, &dev->driver_pd);
767         if (err) {
768                 mthca_err(dev, "Failed to create driver PD, "
769                           "aborting.\n");
770                 goto err_mr_table_free;
771         }
772
773         err = mthca_init_eq_table(dev);
774         if (err) {
775                 mthca_err(dev, "Failed to initialize "
776                           "event queue table, aborting.\n");
777                 goto err_pd_free;
778         }
779
780         err = mthca_cmd_use_events(dev);
781         if (err) {
782                 mthca_err(dev, "Failed to switch to event-driven "
783                           "firmware commands, aborting.\n");
784                 goto err_eq_table_free;
785         }
786
787         err = mthca_NOP(dev, &status);
788         if (err || status) {
789                 mthca_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting.\n",
790                           dev->mthca_flags & MTHCA_FLAG_MSI_X ?
791                           dev->eq_table.eq[MTHCA_EQ_CMD].msi_x_vector :
792                           dev->pdev->irq);
793                 if (dev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X))
794                         mthca_err(dev, "Try again with MSI/MSI-X disabled.\n");
795                 else
796                         mthca_err(dev, "BIOS or ACPI interrupt routing problem?\n");
797
798                 goto err_cmd_poll;
799         }
800
801         mthca_dbg(dev, "NOP command IRQ test passed\n");
802
803         err = mthca_init_cq_table(dev);
804         if (err) {
805                 mthca_err(dev, "Failed to initialize "
806                           "completion queue table, aborting.\n");
807                 goto err_cmd_poll;
808         }
809
810         err = mthca_init_srq_table(dev);
811         if (err) {
812                 mthca_err(dev, "Failed to initialize "
813                           "shared receive queue table, aborting.\n");
814                 goto err_cq_table_free;
815         }
816
817         err = mthca_init_qp_table(dev);
818         if (err) {
819                 mthca_err(dev, "Failed to initialize "
820                           "queue pair table, aborting.\n");
821                 goto err_srq_table_free;
822         }
823
824         err = mthca_init_av_table(dev);
825         if (err) {
826                 mthca_err(dev, "Failed to initialize "
827                           "address vector table, aborting.\n");
828                 goto err_qp_table_free;
829         }
830
831         err = mthca_init_mcg_table(dev);
832         if (err) {
833                 mthca_err(dev, "Failed to initialize "
834                           "multicast group table, aborting.\n");
835                 goto err_av_table_free;
836         }
837
838         return 0;
839
840 err_av_table_free:
841         mthca_cleanup_av_table(dev);
842
843 err_qp_table_free:
844         mthca_cleanup_qp_table(dev);
845
846 err_srq_table_free:
847         mthca_cleanup_srq_table(dev);
848
849 err_cq_table_free:
850         mthca_cleanup_cq_table(dev);
851
852 err_cmd_poll:
853         mthca_cmd_use_polling(dev);
854
855 err_eq_table_free:
856         mthca_cleanup_eq_table(dev);
857
858 err_pd_free:
859         mthca_pd_free(dev, &dev->driver_pd);
860
861 err_mr_table_free:
862         mthca_cleanup_mr_table(dev);
863
864 err_pd_table_free:
865         mthca_cleanup_pd_table(dev);
866
867 err_kar_unmap:
868         iounmap(dev->kar);
869
870 err_uar_free:
871         mthca_uar_free(dev, &dev->driver_uar);
872
873 err_uar_table_free:
874         mthca_cleanup_uar_table(dev);
875         return err;
876 }
877
878 static int mthca_request_regions(struct pci_dev *pdev, int ddr_hidden)
879 {
880         int err;
881
882         /*
883          * We can't just use pci_request_regions() because the MSI-X
884          * table is right in the middle of the first BAR.  If we did
885          * pci_request_region and grab all of the first BAR, then
886          * setting up MSI-X would fail, since the PCI core wants to do
887          * request_mem_region on the MSI-X vector table.
888          *
889          * So just request what we need right now, and request any
890          * other regions we need when setting up EQs.
891          */
892         if (!request_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
893                                 MTHCA_HCR_SIZE, DRV_NAME))
894                 return -EBUSY;
895
896         err = pci_request_region(pdev, 2, DRV_NAME);
897         if (err)
898                 goto err_bar2_failed;
899
900         if (!ddr_hidden) {
901                 err = pci_request_region(pdev, 4, DRV_NAME);
902                 if (err)
903                         goto err_bar4_failed;
904         }
905
906         return 0;
907
908 err_bar4_failed:
909         pci_release_region(pdev, 2);
910
911 err_bar2_failed:
912         release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
913                            MTHCA_HCR_SIZE);
914
915         return err;
916 }
917
918 static void mthca_release_regions(struct pci_dev *pdev,
919                                   int ddr_hidden)
920 {
921         if (!ddr_hidden)
922                 pci_release_region(pdev, 4);
923
924         pci_release_region(pdev, 2);
925
926         release_mem_region(pci_resource_start(pdev, 0) + MTHCA_HCR_BASE,
927                            MTHCA_HCR_SIZE);
928 }
929
930 static int mthca_enable_msi_x(struct mthca_dev *mdev)
931 {
932         struct msix_entry entries[3];
933         int err;
934
935         entries[0].entry = 0;
936         entries[1].entry = 1;
937         entries[2].entry = 2;
938
939         err = pci_enable_msix(mdev->pdev, entries, ARRAY_SIZE(entries));
940         if (err) {
941                 if (err > 0)
942                         mthca_info(mdev, "Only %d MSI-X vectors available, "
943                                    "not using MSI-X\n", err);
944                 return err;
945         }
946
947         mdev->eq_table.eq[MTHCA_EQ_COMP ].msi_x_vector = entries[0].vector;
948         mdev->eq_table.eq[MTHCA_EQ_ASYNC].msi_x_vector = entries[1].vector;
949         mdev->eq_table.eq[MTHCA_EQ_CMD  ].msi_x_vector = entries[2].vector;
950
951         return 0;
952 }
953
954 /* Types of supported HCA */
955 enum {
956         TAVOR,                  /* MT23108                        */
957         ARBEL_COMPAT,           /* MT25208 in Tavor compat mode   */
958         ARBEL_NATIVE,           /* MT25208 with extended features */
959         SINAI                   /* MT25204 */
960 };
961
962 #define MTHCA_FW_VER(major, minor, subminor) \
963         (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor))
964
965 static struct {
966         u64 latest_fw;
967         u32 flags;
968 } mthca_hca_table[] = {
969         [TAVOR]        = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
970                            .flags     = 0 },
971         [ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600),
972                            .flags     = MTHCA_FLAG_PCIE },
973         [ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400),
974                            .flags     = MTHCA_FLAG_MEMFREE |
975                                         MTHCA_FLAG_PCIE },
976         [SINAI]        = { .latest_fw = MTHCA_FW_VER(1, 1, 0),
977                            .flags     = MTHCA_FLAG_MEMFREE |
978                                         MTHCA_FLAG_PCIE    |
979                                         MTHCA_FLAG_SINAI_OPT }
980 };
981
982 static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
983 {
984         int ddr_hidden = 0;
985         int err;
986         struct mthca_dev *mdev;
987
988         printk(KERN_INFO PFX "Initializing %s\n",
989                pci_name(pdev));
990
991         err = pci_enable_device(pdev);
992         if (err) {
993                 dev_err(&pdev->dev, "Cannot enable PCI device, "
994                         "aborting.\n");
995                 return err;
996         }
997
998         /*
999          * Check for BARs.  We expect 0: 1MB, 2: 8MB, 4: DDR (may not
1000          * be present)
1001          */
1002         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
1003             pci_resource_len(pdev, 0) != 1 << 20) {
1004                 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
1005                 err = -ENODEV;
1006                 goto err_disable_pdev;
1007         }
1008         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1009                 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1010                 err = -ENODEV;
1011                 goto err_disable_pdev;
1012         }
1013         if (!(pci_resource_flags(pdev, 4) & IORESOURCE_MEM))
1014                 ddr_hidden = 1;
1015
1016         err = mthca_request_regions(pdev, ddr_hidden);
1017         if (err) {
1018                 dev_err(&pdev->dev, "Cannot obtain PCI resources, "
1019                         "aborting.\n");
1020                 goto err_disable_pdev;
1021         }
1022
1023         pci_set_master(pdev);
1024
1025         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1026         if (err) {
1027                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1028                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1029                 if (err) {
1030                         dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1031                         goto err_free_res;
1032                 }
1033         }
1034         err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1035         if (err) {
1036                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1037                          "consistent PCI DMA mask.\n");
1038                 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1039                 if (err) {
1040                         dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1041                                 "aborting.\n");
1042                         goto err_free_res;
1043                 }
1044         }
1045
1046         mdev = (struct mthca_dev *) ib_alloc_device(sizeof *mdev);
1047         if (!mdev) {
1048                 dev_err(&pdev->dev, "Device struct alloc failed, "
1049                         "aborting.\n");
1050                 err = -ENOMEM;
1051                 goto err_free_res;
1052         }
1053
1054         mdev->pdev = pdev;
1055
1056         mdev->mthca_flags = mthca_hca_table[hca_type].flags;
1057         if (ddr_hidden)
1058                 mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN;
1059
1060         /*
1061          * Now reset the HCA before we touch the PCI capabilities or
1062          * attempt a firmware command, since a boot ROM may have left
1063          * the HCA in an undefined state.
1064          */
1065         err = mthca_reset(mdev);
1066         if (err) {
1067                 mthca_err(mdev, "Failed to reset HCA, aborting.\n");
1068                 goto err_free_dev;
1069         }
1070
1071         if (msi_x && !mthca_enable_msi_x(mdev))
1072                 mdev->mthca_flags |= MTHCA_FLAG_MSI_X;
1073         if (msi && !(mdev->mthca_flags & MTHCA_FLAG_MSI_X) &&
1074             !pci_enable_msi(pdev))
1075                 mdev->mthca_flags |= MTHCA_FLAG_MSI;
1076
1077         if (mthca_cmd_init(mdev)) {
1078                 mthca_err(mdev, "Failed to init command interface, aborting.\n");
1079                 goto err_free_dev;
1080         }
1081
1082         err = mthca_tune_pci(mdev);
1083         if (err)
1084                 goto err_cmd;
1085
1086         err = mthca_init_hca(mdev);
1087         if (err)
1088                 goto err_cmd;
1089
1090         if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
1091                 mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
1092                            (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
1093                            (int) (mdev->fw_ver & 0xffff),
1094                            (int) (mthca_hca_table[hca_type].latest_fw >> 32),
1095                            (int) (mthca_hca_table[hca_type].latest_fw >> 16) & 0xffff,
1096                            (int) (mthca_hca_table[hca_type].latest_fw & 0xffff));
1097                 mthca_warn(mdev, "If you have problems, try updating your HCA FW.\n");
1098         }
1099
1100         err = mthca_setup_hca(mdev);
1101         if (err)
1102                 goto err_close;
1103
1104         err = mthca_register_device(mdev);
1105         if (err)
1106                 goto err_cleanup;
1107
1108         err = mthca_create_agents(mdev);
1109         if (err)
1110                 goto err_unregister;
1111
1112         pci_set_drvdata(pdev, mdev);
1113         mdev->hca_type = hca_type;
1114
1115         return 0;
1116
1117 err_unregister:
1118         mthca_unregister_device(mdev);
1119
1120 err_cleanup:
1121         mthca_cleanup_mcg_table(mdev);
1122         mthca_cleanup_av_table(mdev);
1123         mthca_cleanup_qp_table(mdev);
1124         mthca_cleanup_srq_table(mdev);
1125         mthca_cleanup_cq_table(mdev);
1126         mthca_cmd_use_polling(mdev);
1127         mthca_cleanup_eq_table(mdev);
1128
1129         mthca_pd_free(mdev, &mdev->driver_pd);
1130
1131         mthca_cleanup_mr_table(mdev);
1132         mthca_cleanup_pd_table(mdev);
1133         mthca_cleanup_uar_table(mdev);
1134
1135 err_close:
1136         mthca_close_hca(mdev);
1137
1138 err_cmd:
1139         mthca_cmd_cleanup(mdev);
1140
1141 err_free_dev:
1142         if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1143                 pci_disable_msix(pdev);
1144         if (mdev->mthca_flags & MTHCA_FLAG_MSI)
1145                 pci_disable_msi(pdev);
1146
1147         ib_dealloc_device(&mdev->ib_dev);
1148
1149 err_free_res:
1150         mthca_release_regions(pdev, ddr_hidden);
1151
1152 err_disable_pdev:
1153         pci_disable_device(pdev);
1154         pci_set_drvdata(pdev, NULL);
1155         return err;
1156 }
1157
1158 static void __mthca_remove_one(struct pci_dev *pdev)
1159 {
1160         struct mthca_dev *mdev = pci_get_drvdata(pdev);
1161         u8 status;
1162         int p;
1163
1164         if (mdev) {
1165                 mthca_free_agents(mdev);
1166                 mthca_unregister_device(mdev);
1167
1168                 for (p = 1; p <= mdev->limits.num_ports; ++p)
1169                         mthca_CLOSE_IB(mdev, p, &status);
1170
1171                 mthca_cleanup_mcg_table(mdev);
1172                 mthca_cleanup_av_table(mdev);
1173                 mthca_cleanup_qp_table(mdev);
1174                 mthca_cleanup_srq_table(mdev);
1175                 mthca_cleanup_cq_table(mdev);
1176                 mthca_cmd_use_polling(mdev);
1177                 mthca_cleanup_eq_table(mdev);
1178
1179                 mthca_pd_free(mdev, &mdev->driver_pd);
1180
1181                 mthca_cleanup_mr_table(mdev);
1182                 mthca_cleanup_pd_table(mdev);
1183
1184                 iounmap(mdev->kar);
1185                 mthca_uar_free(mdev, &mdev->driver_uar);
1186                 mthca_cleanup_uar_table(mdev);
1187                 mthca_close_hca(mdev);
1188                 mthca_cmd_cleanup(mdev);
1189
1190                 if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
1191                         pci_disable_msix(pdev);
1192                 if (mdev->mthca_flags & MTHCA_FLAG_MSI)
1193                         pci_disable_msi(pdev);
1194
1195                 ib_dealloc_device(&mdev->ib_dev);
1196                 mthca_release_regions(pdev, mdev->mthca_flags &
1197                                       MTHCA_FLAG_DDR_HIDDEN);
1198                 pci_disable_device(pdev);
1199                 pci_set_drvdata(pdev, NULL);
1200         }
1201 }
1202
1203 int __mthca_restart_one(struct pci_dev *pdev)
1204 {
1205         struct mthca_dev *mdev;
1206
1207         mdev = pci_get_drvdata(pdev);
1208         if (!mdev)
1209                 return -ENODEV;
1210         __mthca_remove_one(pdev);
1211         return __mthca_init_one(pdev, mdev->hca_type);
1212 }
1213
1214 static int __devinit mthca_init_one(struct pci_dev *pdev,
1215                                     const struct pci_device_id *id)
1216 {
1217         static int mthca_version_printed = 0;
1218         int ret;
1219
1220         mutex_lock(&mthca_device_mutex);
1221
1222         if (!mthca_version_printed) {
1223                 printk(KERN_INFO "%s", mthca_version);
1224                 ++mthca_version_printed;
1225         }
1226
1227         if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
1228                 printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
1229                        pci_name(pdev), id->driver_data);
1230                 mutex_unlock(&mthca_device_mutex);
1231                 return -ENODEV;
1232         }
1233
1234         ret = __mthca_init_one(pdev, id->driver_data);
1235
1236         mutex_unlock(&mthca_device_mutex);
1237
1238         return ret;
1239 }
1240
1241 static void __devexit mthca_remove_one(struct pci_dev *pdev)
1242 {
1243         mutex_lock(&mthca_device_mutex);
1244         __mthca_remove_one(pdev);
1245         mutex_unlock(&mthca_device_mutex);
1246 }
1247
1248 static struct pci_device_id mthca_pci_table[] = {
1249         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR),
1250           .driver_data = TAVOR },
1251         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_TAVOR),
1252           .driver_data = TAVOR },
1253         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1254           .driver_data = ARBEL_COMPAT },
1255         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT),
1256           .driver_data = ARBEL_COMPAT },
1257         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_ARBEL),
1258           .driver_data = ARBEL_NATIVE },
1259         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_ARBEL),
1260           .driver_data = ARBEL_NATIVE },
1261         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI),
1262           .driver_data = SINAI },
1263         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI),
1264           .driver_data = SINAI },
1265         { PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1266           .driver_data = SINAI },
1267         { PCI_DEVICE(PCI_VENDOR_ID_TOPSPIN, PCI_DEVICE_ID_MELLANOX_SINAI_OLD),
1268           .driver_data = SINAI },
1269         { 0, }
1270 };
1271
1272 MODULE_DEVICE_TABLE(pci, mthca_pci_table);
1273
1274 static struct pci_driver mthca_driver = {
1275         .name           = DRV_NAME,
1276         .id_table       = mthca_pci_table,
1277         .probe          = mthca_init_one,
1278         .remove         = __devexit_p(mthca_remove_one)
1279 };
1280
1281 static int __init mthca_init(void)
1282 {
1283         int ret;
1284
1285         mutex_init(&mthca_device_mutex);
1286         ret = mthca_catas_init();
1287         if (ret)
1288                 return ret;
1289
1290         ret = pci_register_driver(&mthca_driver);
1291         if (ret < 0) {
1292                 mthca_catas_cleanup();
1293                 return ret;
1294         }
1295
1296         return 0;
1297 }
1298
1299 static void __exit mthca_cleanup(void)
1300 {
1301         pci_unregister_driver(&mthca_driver);
1302         mthca_catas_cleanup();
1303 }
1304
1305 module_init(mthca_init);
1306 module_exit(mthca_cleanup);