Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/agpgart
[pandora-kernel.git] / drivers / net / mlx4 / main.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41
42 #include <linux/mlx4/device.h>
43 #include <linux/mlx4/doorbell.h>
44
45 #include "mlx4.h"
46 #include "fw.h"
47 #include "icm.h"
48
49 MODULE_AUTHOR("Roland Dreier");
50 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 #ifdef CONFIG_MLX4_DEBUG
55
56 int mlx4_debug_level = 0;
57 module_param_named(debug_level, mlx4_debug_level, int, 0644);
58 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
59
60 #endif /* CONFIG_MLX4_DEBUG */
61
62 #ifdef CONFIG_PCI_MSI
63
64 static int msi_x;
65 module_param(msi_x, int, 0444);
66 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
67
68 #else /* CONFIG_PCI_MSI */
69
70 #define msi_x (0)
71
72 #endif /* CONFIG_PCI_MSI */
73
74 static const char mlx4_version[] __devinitdata =
75         DRV_NAME ": Mellanox ConnectX core driver v"
76         DRV_VERSION " (" DRV_RELDATE ")\n";
77
78 static struct mlx4_profile default_profile = {
79         .num_qp         = 1 << 16,
80         .num_srq        = 1 << 16,
81         .rdmarc_per_qp  = 4,
82         .num_cq         = 1 << 16,
83         .num_mcg        = 1 << 13,
84         .num_mpt        = 1 << 17,
85         .num_mtt        = 1 << 20,
86 };
87
88 static int __devinit mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
89 {
90         int err;
91
92         err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
93         if (err) {
94                 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
95                 return err;
96         }
97
98         if (dev_cap->min_page_sz > PAGE_SIZE) {
99                 mlx4_err(dev, "HCA minimum page size of %d bigger than "
100                          "kernel PAGE_SIZE of %ld, aborting.\n",
101                          dev_cap->min_page_sz, PAGE_SIZE);
102                 return -ENODEV;
103         }
104         if (dev_cap->num_ports > MLX4_MAX_PORTS) {
105                 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
106                          "aborting.\n",
107                          dev_cap->num_ports, MLX4_MAX_PORTS);
108                 return -ENODEV;
109         }
110
111         if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
112                 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
113                          "PCI resource 2 size of 0x%llx, aborting.\n",
114                          dev_cap->uar_size,
115                          (unsigned long long) pci_resource_len(dev->pdev, 2));
116                 return -ENODEV;
117         }
118
119         dev->caps.num_ports          = dev_cap->num_ports;
120         dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
121         dev->caps.vl_cap             = dev_cap->max_vl;
122         dev->caps.mtu_cap            = dev_cap->max_mtu;
123         dev->caps.gid_table_len      = dev_cap->max_gids;
124         dev->caps.pkey_table_len     = dev_cap->max_pkeys;
125         dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
126         dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
127         dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
128         dev->caps.max_sq_sg          = dev_cap->max_sq_sg;
129         dev->caps.max_rq_sg          = dev_cap->max_rq_sg;
130         dev->caps.max_wqes           = dev_cap->max_qp_sz;
131         dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
132         dev->caps.reserved_qps       = dev_cap->reserved_qps;
133         dev->caps.max_srq_wqes       = dev_cap->max_srq_sz;
134         dev->caps.max_srq_sge        = dev_cap->max_rq_sg - 1;
135         dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
136         dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
137         dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
138         dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
139         /*
140          * Subtract 1 from the limit because we need to allocate a
141          * spare CQE so the HCA HW can tell the difference between an
142          * empty CQ and a full CQ.
143          */
144         dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
145         dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
146         dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
147         dev->caps.reserved_mtts      = dev_cap->reserved_mtts;
148         dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
149         dev->caps.reserved_uars      = dev_cap->reserved_uars;
150         dev->caps.reserved_pds       = dev_cap->reserved_pds;
151         dev->caps.port_width_cap     = dev_cap->max_port_width;
152         dev->caps.mtt_entry_sz       = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
153         dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
154         dev->caps.flags              = dev_cap->flags;
155         dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
156
157         return 0;
158 }
159
160 static int __devinit mlx4_load_fw(struct mlx4_dev *dev)
161 {
162         struct mlx4_priv *priv = mlx4_priv(dev);
163         int err;
164
165         priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
166                                          GFP_HIGHUSER | __GFP_NOWARN);
167         if (!priv->fw.fw_icm) {
168                 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
169                 return -ENOMEM;
170         }
171
172         err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
173         if (err) {
174                 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
175                 goto err_free;
176         }
177
178         err = mlx4_RUN_FW(dev);
179         if (err) {
180                 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
181                 goto err_unmap_fa;
182         }
183
184         return 0;
185
186 err_unmap_fa:
187         mlx4_UNMAP_FA(dev);
188
189 err_free:
190         mlx4_free_icm(dev, priv->fw.fw_icm);
191         return err;
192 }
193
194 static int __devinit mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
195                                           int cmpt_entry_sz)
196 {
197         struct mlx4_priv *priv = mlx4_priv(dev);
198         int err;
199
200         err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
201                                   cmpt_base +
202                                   ((u64) (MLX4_CMPT_TYPE_QP *
203                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
204                                   cmpt_entry_sz, dev->caps.num_qps,
205                                   dev->caps.reserved_qps, 0);
206         if (err)
207                 goto err;
208
209         err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
210                                   cmpt_base +
211                                   ((u64) (MLX4_CMPT_TYPE_SRQ *
212                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
213                                   cmpt_entry_sz, dev->caps.num_srqs,
214                                   dev->caps.reserved_srqs, 0);
215         if (err)
216                 goto err_qp;
217
218         err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
219                                   cmpt_base +
220                                   ((u64) (MLX4_CMPT_TYPE_CQ *
221                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
222                                   cmpt_entry_sz, dev->caps.num_cqs,
223                                   dev->caps.reserved_cqs, 0);
224         if (err)
225                 goto err_srq;
226
227         err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
228                                   cmpt_base +
229                                   ((u64) (MLX4_CMPT_TYPE_EQ *
230                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
231                                   cmpt_entry_sz,
232                                   roundup_pow_of_two(MLX4_NUM_EQ +
233                                                      dev->caps.reserved_eqs),
234                                   MLX4_NUM_EQ + dev->caps.reserved_eqs, 0);
235         if (err)
236                 goto err_cq;
237
238         return 0;
239
240 err_cq:
241         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
242
243 err_srq:
244         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
245
246 err_qp:
247         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
248
249 err:
250         return err;
251 }
252
253 static int __devinit mlx4_init_icm(struct mlx4_dev *dev,
254                                    struct mlx4_dev_cap *dev_cap,
255                                    struct mlx4_init_hca_param *init_hca,
256                                    u64 icm_size)
257 {
258         struct mlx4_priv *priv = mlx4_priv(dev);
259         u64 aux_pages;
260         int err;
261
262         err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
263         if (err) {
264                 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
265                 return err;
266         }
267
268         mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
269                  (unsigned long long) icm_size >> 10,
270                  (unsigned long long) aux_pages << 2);
271
272         priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
273                                           GFP_HIGHUSER | __GFP_NOWARN);
274         if (!priv->fw.aux_icm) {
275                 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
276                 return -ENOMEM;
277         }
278
279         err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
280         if (err) {
281                 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
282                 goto err_free_aux;
283         }
284
285         err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
286         if (err) {
287                 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
288                 goto err_unmap_aux;
289         }
290
291         err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
292         if (err) {
293                 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
294                 goto err_unmap_cmpt;
295         }
296
297         err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
298                                   init_hca->mtt_base,
299                                   dev->caps.mtt_entry_sz,
300                                   dev->caps.num_mtt_segs,
301                                   dev->caps.reserved_mtts, 1);
302         if (err) {
303                 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
304                 goto err_unmap_eq;
305         }
306
307         err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
308                                   init_hca->dmpt_base,
309                                   dev_cap->dmpt_entry_sz,
310                                   dev->caps.num_mpts,
311                                   dev->caps.reserved_mrws, 1);
312         if (err) {
313                 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
314                 goto err_unmap_mtt;
315         }
316
317         err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
318                                   init_hca->qpc_base,
319                                   dev_cap->qpc_entry_sz,
320                                   dev->caps.num_qps,
321                                   dev->caps.reserved_qps, 0);
322         if (err) {
323                 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
324                 goto err_unmap_dmpt;
325         }
326
327         err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
328                                   init_hca->auxc_base,
329                                   dev_cap->aux_entry_sz,
330                                   dev->caps.num_qps,
331                                   dev->caps.reserved_qps, 0);
332         if (err) {
333                 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
334                 goto err_unmap_qp;
335         }
336
337         err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
338                                   init_hca->altc_base,
339                                   dev_cap->altc_entry_sz,
340                                   dev->caps.num_qps,
341                                   dev->caps.reserved_qps, 0);
342         if (err) {
343                 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
344                 goto err_unmap_auxc;
345         }
346
347         err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
348                                   init_hca->rdmarc_base,
349                                   dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
350                                   dev->caps.num_qps,
351                                   dev->caps.reserved_qps, 0);
352         if (err) {
353                 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
354                 goto err_unmap_altc;
355         }
356
357         err = mlx4_init_icm_table(dev, &priv->cq_table.table,
358                                   init_hca->cqc_base,
359                                   dev_cap->cqc_entry_sz,
360                                   dev->caps.num_cqs,
361                                   dev->caps.reserved_cqs, 0);
362         if (err) {
363                 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
364                 goto err_unmap_rdmarc;
365         }
366
367         err = mlx4_init_icm_table(dev, &priv->srq_table.table,
368                                   init_hca->srqc_base,
369                                   dev_cap->srq_entry_sz,
370                                   dev->caps.num_srqs,
371                                   dev->caps.reserved_srqs, 0);
372         if (err) {
373                 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
374                 goto err_unmap_cq;
375         }
376
377         /*
378          * It's not strictly required, but for simplicity just map the
379          * whole multicast group table now.  The table isn't very big
380          * and it's a lot easier than trying to track ref counts.
381          */
382         err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
383                                   init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
384                                   dev->caps.num_mgms + dev->caps.num_amgms,
385                                   dev->caps.num_mgms + dev->caps.num_amgms,
386                                   0);
387         if (err) {
388                 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
389                 goto err_unmap_srq;
390         }
391
392         return 0;
393
394 err_unmap_srq:
395         mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
396
397 err_unmap_cq:
398         mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
399
400 err_unmap_rdmarc:
401         mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
402
403 err_unmap_altc:
404         mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
405
406 err_unmap_auxc:
407         mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
408
409 err_unmap_qp:
410         mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
411
412 err_unmap_dmpt:
413         mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
414
415 err_unmap_mtt:
416         mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
417
418 err_unmap_eq:
419         mlx4_unmap_eq_icm(dev);
420
421 err_unmap_cmpt:
422         mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
423         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
424         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
425         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
426
427 err_unmap_aux:
428         mlx4_UNMAP_ICM_AUX(dev);
429
430 err_free_aux:
431         mlx4_free_icm(dev, priv->fw.aux_icm);
432
433         return err;
434 }
435
436 static void mlx4_free_icms(struct mlx4_dev *dev)
437 {
438         struct mlx4_priv *priv = mlx4_priv(dev);
439
440         mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
441         mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
442         mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
443         mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
444         mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
445         mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
446         mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
447         mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
448         mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
449         mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
450         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
451         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
452         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
453         mlx4_unmap_eq_icm(dev);
454
455         mlx4_UNMAP_ICM_AUX(dev);
456         mlx4_free_icm(dev, priv->fw.aux_icm);
457 }
458
459 static void mlx4_close_hca(struct mlx4_dev *dev)
460 {
461         mlx4_CLOSE_HCA(dev, 0);
462         mlx4_free_icms(dev);
463         mlx4_UNMAP_FA(dev);
464         mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm);
465 }
466
467 static int __devinit mlx4_init_hca(struct mlx4_dev *dev)
468 {
469         struct mlx4_priv          *priv = mlx4_priv(dev);
470         struct mlx4_adapter        adapter;
471         struct mlx4_dev_cap        dev_cap;
472         struct mlx4_profile        profile;
473         struct mlx4_init_hca_param init_hca;
474         u64 icm_size;
475         int err;
476
477         err = mlx4_QUERY_FW(dev);
478         if (err) {
479                 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
480                 return err;
481         }
482
483         err = mlx4_load_fw(dev);
484         if (err) {
485                 mlx4_err(dev, "Failed to start FW, aborting.\n");
486                 return err;
487         }
488
489         err = mlx4_dev_cap(dev, &dev_cap);
490         if (err) {
491                 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
492                 goto err_stop_fw;
493         }
494
495         profile = default_profile;
496
497         icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
498         if ((long long) icm_size < 0) {
499                 err = icm_size;
500                 goto err_stop_fw;
501         }
502
503         init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
504
505         err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
506         if (err)
507                 goto err_stop_fw;
508
509         err = mlx4_INIT_HCA(dev, &init_hca);
510         if (err) {
511                 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
512                 goto err_free_icm;
513         }
514
515         err = mlx4_QUERY_ADAPTER(dev, &adapter);
516         if (err) {
517                 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
518                 goto err_close;
519         }
520
521         priv->eq_table.inta_pin = adapter.inta_pin;
522         priv->rev_id            = adapter.revision_id;
523         memcpy(priv->board_id, adapter.board_id, sizeof priv->board_id);
524
525         return 0;
526
527 err_close:
528         mlx4_close_hca(dev);
529
530 err_free_icm:
531         mlx4_free_icms(dev);
532
533 err_stop_fw:
534         mlx4_UNMAP_FA(dev);
535         mlx4_free_icm(dev, priv->fw.fw_icm);
536
537         return err;
538 }
539
540 static int __devinit mlx4_setup_hca(struct mlx4_dev *dev)
541 {
542         struct mlx4_priv *priv = mlx4_priv(dev);
543         int err;
544
545         err = mlx4_init_uar_table(dev);
546         if (err) {
547                 mlx4_err(dev, "Failed to initialize "
548                          "user access region table, aborting.\n");
549                 return err;
550         }
551
552         err = mlx4_uar_alloc(dev, &priv->driver_uar);
553         if (err) {
554                 mlx4_err(dev, "Failed to allocate driver access region, "
555                          "aborting.\n");
556                 goto err_uar_table_free;
557         }
558
559         priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
560         if (!priv->kar) {
561                 mlx4_err(dev, "Couldn't map kernel access region, "
562                          "aborting.\n");
563                 err = -ENOMEM;
564                 goto err_uar_free;
565         }
566
567         err = mlx4_init_pd_table(dev);
568         if (err) {
569                 mlx4_err(dev, "Failed to initialize "
570                          "protection domain table, aborting.\n");
571                 goto err_kar_unmap;
572         }
573
574         err = mlx4_init_mr_table(dev);
575         if (err) {
576                 mlx4_err(dev, "Failed to initialize "
577                          "memory region table, aborting.\n");
578                 goto err_pd_table_free;
579         }
580
581         mlx4_map_catas_buf(dev);
582
583         err = mlx4_init_eq_table(dev);
584         if (err) {
585                 mlx4_err(dev, "Failed to initialize "
586                          "event queue table, aborting.\n");
587                 goto err_catas_buf;
588         }
589
590         err = mlx4_cmd_use_events(dev);
591         if (err) {
592                 mlx4_err(dev, "Failed to switch to event-driven "
593                          "firmware commands, aborting.\n");
594                 goto err_eq_table_free;
595         }
596
597         err = mlx4_NOP(dev);
598         if (err) {
599                 mlx4_err(dev, "NOP command failed to generate interrupt "
600                          "(IRQ %d), aborting.\n",
601                          priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
602                 if (dev->flags & MLX4_FLAG_MSI_X)
603                         mlx4_err(dev, "Try again with MSI-X disabled.\n");
604                 else
605                         mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
606
607                 goto err_cmd_poll;
608         }
609
610         mlx4_dbg(dev, "NOP command IRQ test passed\n");
611
612         err = mlx4_init_cq_table(dev);
613         if (err) {
614                 mlx4_err(dev, "Failed to initialize "
615                          "completion queue table, aborting.\n");
616                 goto err_cmd_poll;
617         }
618
619         err = mlx4_init_srq_table(dev);
620         if (err) {
621                 mlx4_err(dev, "Failed to initialize "
622                          "shared receive queue table, aborting.\n");
623                 goto err_cq_table_free;
624         }
625
626         err = mlx4_init_qp_table(dev);
627         if (err) {
628                 mlx4_err(dev, "Failed to initialize "
629                          "queue pair table, aborting.\n");
630                 goto err_srq_table_free;
631         }
632
633         err = mlx4_init_mcg_table(dev);
634         if (err) {
635                 mlx4_err(dev, "Failed to initialize "
636                          "multicast group table, aborting.\n");
637                 goto err_qp_table_free;
638         }
639
640         return 0;
641
642 err_qp_table_free:
643         mlx4_cleanup_qp_table(dev);
644
645 err_srq_table_free:
646         mlx4_cleanup_srq_table(dev);
647
648 err_cq_table_free:
649         mlx4_cleanup_cq_table(dev);
650
651 err_cmd_poll:
652         mlx4_cmd_use_polling(dev);
653
654 err_eq_table_free:
655         mlx4_cleanup_eq_table(dev);
656
657 err_catas_buf:
658         mlx4_unmap_catas_buf(dev);
659         mlx4_cleanup_mr_table(dev);
660
661 err_pd_table_free:
662         mlx4_cleanup_pd_table(dev);
663
664 err_kar_unmap:
665         iounmap(priv->kar);
666
667 err_uar_free:
668         mlx4_uar_free(dev, &priv->driver_uar);
669
670 err_uar_table_free:
671         mlx4_cleanup_uar_table(dev);
672         return err;
673 }
674
675 static void __devinit mlx4_enable_msi_x(struct mlx4_dev *dev)
676 {
677         struct mlx4_priv *priv = mlx4_priv(dev);
678         struct msix_entry entries[MLX4_NUM_EQ];
679         int err;
680         int i;
681
682         if (msi_x) {
683                 for (i = 0; i < MLX4_NUM_EQ; ++i)
684                         entries[i].entry = i;
685
686                 err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
687                 if (err) {
688                         if (err > 0)
689                                 mlx4_info(dev, "Only %d MSI-X vectors available, "
690                                           "not using MSI-X\n", err);
691                         goto no_msi;
692                 }
693
694                 for (i = 0; i < MLX4_NUM_EQ; ++i)
695                         priv->eq_table.eq[i].irq = entries[i].vector;
696
697                 dev->flags |= MLX4_FLAG_MSI_X;
698                 return;
699         }
700
701 no_msi:
702         for (i = 0; i < MLX4_NUM_EQ; ++i)
703                 priv->eq_table.eq[i].irq = dev->pdev->irq;
704 }
705
706 static int __devinit mlx4_init_one(struct pci_dev *pdev,
707                                    const struct pci_device_id *id)
708 {
709         static int mlx4_version_printed;
710         struct mlx4_priv *priv;
711         struct mlx4_dev *dev;
712         int err;
713
714         if (!mlx4_version_printed) {
715                 printk(KERN_INFO "%s", mlx4_version);
716                 ++mlx4_version_printed;
717         }
718
719         printk(KERN_INFO PFX "Initializing %s\n",
720                pci_name(pdev));
721
722         err = pci_enable_device(pdev);
723         if (err) {
724                 dev_err(&pdev->dev, "Cannot enable PCI device, "
725                         "aborting.\n");
726                 return err;
727         }
728
729         /*
730          * Check for BARs.  We expect 0: 1MB, 2: 8MB, 4: DDR (may not
731          * be present)
732          */
733         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
734             pci_resource_len(pdev, 0) != 1 << 20) {
735                 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
736                 err = -ENODEV;
737                 goto err_disable_pdev;
738         }
739         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
740                 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
741                 err = -ENODEV;
742                 goto err_disable_pdev;
743         }
744
745         err = pci_request_region(pdev, 0, DRV_NAME);
746         if (err) {
747                 dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
748                 goto err_disable_pdev;
749         }
750
751         err = pci_request_region(pdev, 2, DRV_NAME);
752         if (err) {
753                 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
754                 goto err_release_bar0;
755         }
756
757         pci_set_master(pdev);
758
759         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
760         if (err) {
761                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
762                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
763                 if (err) {
764                         dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
765                         goto err_release_bar2;
766                 }
767         }
768         err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
769         if (err) {
770                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
771                          "consistent PCI DMA mask.\n");
772                 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
773                 if (err) {
774                         dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
775                                 "aborting.\n");
776                         goto err_release_bar2;
777                 }
778         }
779
780         priv = kzalloc(sizeof *priv, GFP_KERNEL);
781         if (!priv) {
782                 dev_err(&pdev->dev, "Device struct alloc failed, "
783                         "aborting.\n");
784                 err = -ENOMEM;
785                 goto err_release_bar2;
786         }
787
788         dev       = &priv->dev;
789         dev->pdev = pdev;
790
791         /*
792          * Now reset the HCA before we touch the PCI capabilities or
793          * attempt a firmware command, since a boot ROM may have left
794          * the HCA in an undefined state.
795          */
796         err = mlx4_reset(dev);
797         if (err) {
798                 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
799                 goto err_free_dev;
800         }
801
802         mlx4_enable_msi_x(dev);
803
804         if (mlx4_cmd_init(dev)) {
805                 mlx4_err(dev, "Failed to init command interface, aborting.\n");
806                 goto err_free_dev;
807         }
808
809         err = mlx4_init_hca(dev);
810         if (err)
811                 goto err_cmd;
812
813         err = mlx4_setup_hca(dev);
814         if (err)
815                 goto err_close;
816
817         err = mlx4_register_device(dev);
818         if (err)
819                 goto err_cleanup;
820
821         pci_set_drvdata(pdev, dev);
822
823         return 0;
824
825 err_cleanup:
826         mlx4_cleanup_mcg_table(dev);
827         mlx4_cleanup_qp_table(dev);
828         mlx4_cleanup_srq_table(dev);
829         mlx4_cleanup_cq_table(dev);
830         mlx4_cmd_use_polling(dev);
831         mlx4_cleanup_eq_table(dev);
832
833         mlx4_unmap_catas_buf(dev);
834
835         mlx4_cleanup_mr_table(dev);
836         mlx4_cleanup_pd_table(dev);
837         mlx4_cleanup_uar_table(dev);
838
839 err_close:
840         mlx4_close_hca(dev);
841
842 err_cmd:
843         mlx4_cmd_cleanup(dev);
844
845 err_free_dev:
846         if (dev->flags & MLX4_FLAG_MSI_X)
847                 pci_disable_msix(pdev);
848
849         kfree(priv);
850
851 err_release_bar2:
852         pci_release_region(pdev, 2);
853
854 err_release_bar0:
855         pci_release_region(pdev, 0);
856
857 err_disable_pdev:
858         pci_disable_device(pdev);
859         pci_set_drvdata(pdev, NULL);
860         return err;
861 }
862
863 static void __devexit mlx4_remove_one(struct pci_dev *pdev)
864 {
865         struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
866         struct mlx4_priv *priv = mlx4_priv(dev);
867         int p;
868
869         if (dev) {
870                 mlx4_unregister_device(dev);
871
872                 for (p = 1; p <= dev->caps.num_ports; ++p)
873                         mlx4_CLOSE_PORT(dev, p);
874
875                 mlx4_cleanup_mcg_table(dev);
876                 mlx4_cleanup_qp_table(dev);
877                 mlx4_cleanup_srq_table(dev);
878                 mlx4_cleanup_cq_table(dev);
879                 mlx4_cmd_use_polling(dev);
880                 mlx4_cleanup_eq_table(dev);
881
882                 mlx4_unmap_catas_buf(dev);
883
884                 mlx4_cleanup_mr_table(dev);
885                 mlx4_cleanup_pd_table(dev);
886
887                 iounmap(priv->kar);
888                 mlx4_uar_free(dev, &priv->driver_uar);
889                 mlx4_cleanup_uar_table(dev);
890                 mlx4_close_hca(dev);
891                 mlx4_cmd_cleanup(dev);
892
893                 if (dev->flags & MLX4_FLAG_MSI_X)
894                         pci_disable_msix(pdev);
895
896                 kfree(priv);
897                 pci_release_region(pdev, 2);
898                 pci_release_region(pdev, 0);
899                 pci_disable_device(pdev);
900                 pci_set_drvdata(pdev, NULL);
901         }
902 }
903
904 static struct pci_device_id mlx4_pci_table[] = {
905         { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
906         { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
907         { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
908         { 0, }
909 };
910
911 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
912
913 static struct pci_driver mlx4_driver = {
914         .name           = DRV_NAME,
915         .id_table       = mlx4_pci_table,
916         .probe          = mlx4_init_one,
917         .remove         = __devexit_p(mlx4_remove_one)
918 };
919
920 static int __init mlx4_init(void)
921 {
922         int ret;
923
924         ret = pci_register_driver(&mlx4_driver);
925         return ret < 0 ? ret : 0;
926 }
927
928 static void __exit mlx4_cleanup(void)
929 {
930         pci_unregister_driver(&mlx4_driver);
931 }
932
933 module_init(mlx4_init);
934 module_exit(mlx4_cleanup);