2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/slab.h>
42 #include <linux/io-mapping.h>
44 #include <linux/mlx4/device.h>
45 #include <linux/mlx4/doorbell.h>
51 MODULE_AUTHOR("Roland Dreier");
52 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
53 MODULE_LICENSE("Dual BSD/GPL");
54 MODULE_VERSION(DRV_VERSION);
56 struct workqueue_struct *mlx4_wq;
58 #ifdef CONFIG_MLX4_DEBUG
60 int mlx4_debug_level = 0;
61 module_param_named(debug_level, mlx4_debug_level, int, 0644);
62 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
64 #endif /* CONFIG_MLX4_DEBUG */
69 module_param(msi_x, int, 0444);
70 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
72 #else /* CONFIG_PCI_MSI */
76 #endif /* CONFIG_PCI_MSI */
78 static char mlx4_version[] __devinitdata =
79 DRV_NAME ": Mellanox ConnectX core driver v"
80 DRV_VERSION " (" DRV_RELDATE ")\n";
82 static struct mlx4_profile default_profile = {
85 .rdmarc_per_qp = 1 << 4,
92 static int log_num_mac = 2;
93 module_param_named(log_num_mac, log_num_mac, int, 0444);
94 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
96 static int log_num_vlan;
97 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
98 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
101 module_param_named(use_prio, use_prio, bool, 0444);
102 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
105 static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
106 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
107 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
109 int mlx4_check_port_params(struct mlx4_dev *dev,
110 enum mlx4_port_type *port_type)
114 for (i = 0; i < dev->caps.num_ports - 1; i++) {
115 if (port_type[i] != port_type[i + 1]) {
116 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
117 mlx4_err(dev, "Only same port types supported "
118 "on this HCA, aborting.\n");
121 if (port_type[i] == MLX4_PORT_TYPE_ETH &&
122 port_type[i + 1] == MLX4_PORT_TYPE_IB)
127 for (i = 0; i < dev->caps.num_ports; i++) {
128 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
129 mlx4_err(dev, "Requested port type for port %d is not "
130 "supported on this HCA\n", i + 1);
137 static void mlx4_set_port_mask(struct mlx4_dev *dev)
141 dev->caps.port_mask = 0;
142 for (i = 1; i <= dev->caps.num_ports; ++i)
143 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
144 dev->caps.port_mask |= 1 << (i - 1);
147 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
152 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
154 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
158 if (dev_cap->min_page_sz > PAGE_SIZE) {
159 mlx4_err(dev, "HCA minimum page size of %d bigger than "
160 "kernel PAGE_SIZE of %ld, aborting.\n",
161 dev_cap->min_page_sz, PAGE_SIZE);
164 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
165 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
167 dev_cap->num_ports, MLX4_MAX_PORTS);
171 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
172 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
173 "PCI resource 2 size of 0x%llx, aborting.\n",
175 (unsigned long long) pci_resource_len(dev->pdev, 2));
179 dev->caps.num_ports = dev_cap->num_ports;
180 for (i = 1; i <= dev->caps.num_ports; ++i) {
181 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
182 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
183 dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
184 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
185 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
186 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
187 dev->caps.def_mac[i] = dev_cap->def_mac[i];
188 dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
189 dev->caps.trans_type[i] = dev_cap->trans_type[i];
190 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
191 dev->caps.wavelength[i] = dev_cap->wavelength[i];
192 dev->caps.trans_code[i] = dev_cap->trans_code[i];
195 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
196 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
197 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
198 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
199 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
200 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
201 dev->caps.max_wqes = dev_cap->max_qp_sz;
202 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
203 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
204 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
205 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
206 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
207 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
208 dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
210 * Subtract 1 from the limit because we need to allocate a
211 * spare CQE so the HCA HW can tell the difference between an
212 * empty CQ and a full CQ.
214 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
215 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
216 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
217 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
218 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
219 dev->caps.mtts_per_seg);
220 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
221 dev->caps.reserved_uars = dev_cap->reserved_uars;
222 dev->caps.reserved_pds = dev_cap->reserved_pds;
223 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
224 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
225 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
226 dev->caps.flags = dev_cap->flags;
227 dev->caps.bmme_flags = dev_cap->bmme_flags;
228 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
229 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
230 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
232 dev->caps.log_num_macs = log_num_mac;
233 dev->caps.log_num_vlans = log_num_vlan;
234 dev->caps.log_num_prios = use_prio ? 3 : 0;
236 for (i = 1; i <= dev->caps.num_ports; ++i) {
237 if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
238 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
240 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
241 dev->caps.possible_type[i] = dev->caps.port_type[i];
242 mlx4_priv(dev)->sense.sense_allowed[i] =
243 dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
245 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
246 dev->caps.log_num_macs = dev_cap->log_max_macs[i];
247 mlx4_warn(dev, "Requested number of MACs is too much "
248 "for port %d, reducing to %d.\n",
249 i, 1 << dev->caps.log_num_macs);
251 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
252 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
253 mlx4_warn(dev, "Requested number of VLANs is too much "
254 "for port %d, reducing to %d.\n",
255 i, 1 << dev->caps.log_num_vlans);
259 mlx4_set_port_mask(dev);
261 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
263 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
264 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
265 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
266 (1 << dev->caps.log_num_macs) *
267 (1 << dev->caps.log_num_vlans) *
268 (1 << dev->caps.log_num_prios) *
270 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
272 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
273 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
274 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
275 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
281 * Change the port configuration of the device.
282 * Every user of this function must hold the port mutex.
284 int mlx4_change_port_types(struct mlx4_dev *dev,
285 enum mlx4_port_type *port_types)
291 for (port = 0; port < dev->caps.num_ports; port++) {
292 /* Change the port type only if the new type is different
293 * from the current, and not set to Auto */
294 if (port_types[port] != dev->caps.port_type[port + 1]) {
296 dev->caps.port_type[port + 1] = port_types[port];
300 mlx4_unregister_device(dev);
301 for (port = 1; port <= dev->caps.num_ports; port++) {
302 mlx4_CLOSE_PORT(dev, port);
303 err = mlx4_SET_PORT(dev, port);
305 mlx4_err(dev, "Failed to set port %d, "
310 mlx4_set_port_mask(dev);
311 err = mlx4_register_device(dev);
318 static ssize_t show_port_type(struct device *dev,
319 struct device_attribute *attr,
322 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
324 struct mlx4_dev *mdev = info->dev;
328 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
330 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
331 sprintf(buf, "auto (%s)\n", type);
333 sprintf(buf, "%s\n", type);
338 static ssize_t set_port_type(struct device *dev,
339 struct device_attribute *attr,
340 const char *buf, size_t count)
342 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
344 struct mlx4_dev *mdev = info->dev;
345 struct mlx4_priv *priv = mlx4_priv(mdev);
346 enum mlx4_port_type types[MLX4_MAX_PORTS];
347 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
351 if (!strcmp(buf, "ib\n"))
352 info->tmp_type = MLX4_PORT_TYPE_IB;
353 else if (!strcmp(buf, "eth\n"))
354 info->tmp_type = MLX4_PORT_TYPE_ETH;
355 else if (!strcmp(buf, "auto\n"))
356 info->tmp_type = MLX4_PORT_TYPE_AUTO;
358 mlx4_err(mdev, "%s is not supported port type\n", buf);
362 mlx4_stop_sense(mdev);
363 mutex_lock(&priv->port_mutex);
364 /* Possible type is always the one that was delivered */
365 mdev->caps.possible_type[info->port] = info->tmp_type;
367 for (i = 0; i < mdev->caps.num_ports; i++) {
368 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
369 mdev->caps.possible_type[i+1];
370 if (types[i] == MLX4_PORT_TYPE_AUTO)
371 types[i] = mdev->caps.port_type[i+1];
374 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
375 for (i = 1; i <= mdev->caps.num_ports; i++) {
376 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
377 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
383 mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
384 "Set only 'eth' or 'ib' for both ports "
385 "(should be the same)\n");
389 mlx4_do_sense_ports(mdev, new_types, types);
391 err = mlx4_check_port_params(mdev, new_types);
395 /* We are about to apply the changes after the configuration
396 * was verified, no need to remember the temporary types
398 for (i = 0; i < mdev->caps.num_ports; i++)
399 priv->port[i + 1].tmp_type = 0;
401 err = mlx4_change_port_types(mdev, new_types);
404 mlx4_start_sense(mdev);
405 mutex_unlock(&priv->port_mutex);
406 return err ? err : count;
409 static int mlx4_load_fw(struct mlx4_dev *dev)
411 struct mlx4_priv *priv = mlx4_priv(dev);
414 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
415 GFP_HIGHUSER | __GFP_NOWARN, 0);
416 if (!priv->fw.fw_icm) {
417 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
421 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
423 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
427 err = mlx4_RUN_FW(dev);
429 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
439 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
443 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
446 struct mlx4_priv *priv = mlx4_priv(dev);
449 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
451 ((u64) (MLX4_CMPT_TYPE_QP *
452 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
453 cmpt_entry_sz, dev->caps.num_qps,
454 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
459 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
461 ((u64) (MLX4_CMPT_TYPE_SRQ *
462 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
463 cmpt_entry_sz, dev->caps.num_srqs,
464 dev->caps.reserved_srqs, 0, 0);
468 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
470 ((u64) (MLX4_CMPT_TYPE_CQ *
471 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
472 cmpt_entry_sz, dev->caps.num_cqs,
473 dev->caps.reserved_cqs, 0, 0);
477 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
479 ((u64) (MLX4_CMPT_TYPE_EQ *
480 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
482 dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
489 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
492 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
495 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
501 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
502 struct mlx4_init_hca_param *init_hca, u64 icm_size)
504 struct mlx4_priv *priv = mlx4_priv(dev);
508 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
510 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
514 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
515 (unsigned long long) icm_size >> 10,
516 (unsigned long long) aux_pages << 2);
518 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
519 GFP_HIGHUSER | __GFP_NOWARN, 0);
520 if (!priv->fw.aux_icm) {
521 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
525 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
527 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
531 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
533 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
537 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
538 init_hca->eqc_base, dev_cap->eqc_entry_sz,
539 dev->caps.num_eqs, dev->caps.num_eqs,
542 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
547 * Reserved MTT entries must be aligned up to a cacheline
548 * boundary, since the FW will write to them, while the driver
549 * writes to all other MTT entries. (The variable
550 * dev->caps.mtt_entry_sz below is really the MTT segment
551 * size, not the raw entry size)
553 dev->caps.reserved_mtts =
554 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
555 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
557 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
559 dev->caps.mtt_entry_sz,
560 dev->caps.num_mtt_segs,
561 dev->caps.reserved_mtts, 1, 0);
563 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
567 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
569 dev_cap->dmpt_entry_sz,
571 dev->caps.reserved_mrws, 1, 1);
573 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
577 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
579 dev_cap->qpc_entry_sz,
581 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
584 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
588 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
590 dev_cap->aux_entry_sz,
592 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
595 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
599 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
601 dev_cap->altc_entry_sz,
603 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
606 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
610 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
611 init_hca->rdmarc_base,
612 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
614 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
617 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
621 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
623 dev_cap->cqc_entry_sz,
625 dev->caps.reserved_cqs, 0, 0);
627 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
628 goto err_unmap_rdmarc;
631 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
633 dev_cap->srq_entry_sz,
635 dev->caps.reserved_srqs, 0, 0);
637 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
642 * It's not strictly required, but for simplicity just map the
643 * whole multicast group table now. The table isn't very big
644 * and it's a lot easier than trying to track ref counts.
646 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
647 init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
648 dev->caps.num_mgms + dev->caps.num_amgms,
649 dev->caps.num_mgms + dev->caps.num_amgms,
652 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
659 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
662 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
665 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
668 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
671 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
674 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
677 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
680 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
683 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
686 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
687 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
688 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
689 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
692 mlx4_UNMAP_ICM_AUX(dev);
695 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
700 static void mlx4_free_icms(struct mlx4_dev *dev)
702 struct mlx4_priv *priv = mlx4_priv(dev);
704 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
705 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
706 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
707 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
708 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
709 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
710 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
711 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
712 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
713 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
714 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
715 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
716 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
717 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
719 mlx4_UNMAP_ICM_AUX(dev);
720 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
723 static int map_bf_area(struct mlx4_dev *dev)
725 struct mlx4_priv *priv = mlx4_priv(dev);
726 resource_size_t bf_start;
727 resource_size_t bf_len;
730 bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
731 bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
732 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
733 if (!priv->bf_mapping)
739 static void unmap_bf_area(struct mlx4_dev *dev)
741 if (mlx4_priv(dev)->bf_mapping)
742 io_mapping_free(mlx4_priv(dev)->bf_mapping);
745 static void mlx4_close_hca(struct mlx4_dev *dev)
748 mlx4_CLOSE_HCA(dev, 0);
751 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
754 static int mlx4_init_hca(struct mlx4_dev *dev)
756 struct mlx4_priv *priv = mlx4_priv(dev);
757 struct mlx4_adapter adapter;
758 struct mlx4_dev_cap dev_cap;
759 struct mlx4_mod_stat_cfg mlx4_cfg;
760 struct mlx4_profile profile;
761 struct mlx4_init_hca_param init_hca;
765 err = mlx4_QUERY_FW(dev);
768 mlx4_info(dev, "non-primary physical function, skipping.\n");
770 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
774 err = mlx4_load_fw(dev);
776 mlx4_err(dev, "Failed to start FW, aborting.\n");
780 mlx4_cfg.log_pg_sz_m = 1;
781 mlx4_cfg.log_pg_sz = 0;
782 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
784 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
786 err = mlx4_dev_cap(dev, &dev_cap);
788 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
792 profile = default_profile;
794 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
795 if ((long long) icm_size < 0) {
800 if (map_bf_area(dev))
801 mlx4_dbg(dev, "Failed to map blue flame area\n");
803 init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
805 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
809 err = mlx4_INIT_HCA(dev, &init_hca);
811 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
815 err = mlx4_QUERY_ADAPTER(dev, &adapter);
817 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
821 priv->eq_table.inta_pin = adapter.inta_pin;
822 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
827 mlx4_CLOSE_HCA(dev, 0);
835 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
840 static int mlx4_init_counters_table(struct mlx4_dev *dev)
842 struct mlx4_priv *priv = mlx4_priv(dev);
845 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
848 nent = dev->caps.max_counters;
849 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
852 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
854 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
857 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
859 struct mlx4_priv *priv = mlx4_priv(dev);
861 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
864 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
870 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
872 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
874 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
877 EXPORT_SYMBOL_GPL(mlx4_counter_free);
879 static int mlx4_setup_hca(struct mlx4_dev *dev)
881 struct mlx4_priv *priv = mlx4_priv(dev);
884 __be32 ib_port_default_caps;
886 err = mlx4_init_uar_table(dev);
888 mlx4_err(dev, "Failed to initialize "
889 "user access region table, aborting.\n");
893 err = mlx4_uar_alloc(dev, &priv->driver_uar);
895 mlx4_err(dev, "Failed to allocate driver access region, "
897 goto err_uar_table_free;
900 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
902 mlx4_err(dev, "Couldn't map kernel access region, "
908 err = mlx4_init_pd_table(dev);
910 mlx4_err(dev, "Failed to initialize "
911 "protection domain table, aborting.\n");
915 err = mlx4_init_mr_table(dev);
917 mlx4_err(dev, "Failed to initialize "
918 "memory region table, aborting.\n");
919 goto err_pd_table_free;
922 err = mlx4_init_eq_table(dev);
924 mlx4_err(dev, "Failed to initialize "
925 "event queue table, aborting.\n");
926 goto err_mr_table_free;
929 err = mlx4_cmd_use_events(dev);
931 mlx4_err(dev, "Failed to switch to event-driven "
932 "firmware commands, aborting.\n");
933 goto err_eq_table_free;
938 if (dev->flags & MLX4_FLAG_MSI_X) {
939 mlx4_warn(dev, "NOP command failed to generate MSI-X "
940 "interrupt IRQ %d).\n",
941 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
942 mlx4_warn(dev, "Trying again without MSI-X.\n");
944 mlx4_err(dev, "NOP command failed to generate interrupt "
945 "(IRQ %d), aborting.\n",
946 priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
947 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
953 mlx4_dbg(dev, "NOP command IRQ test passed\n");
955 err = mlx4_init_cq_table(dev);
957 mlx4_err(dev, "Failed to initialize "
958 "completion queue table, aborting.\n");
962 err = mlx4_init_srq_table(dev);
964 mlx4_err(dev, "Failed to initialize "
965 "shared receive queue table, aborting.\n");
966 goto err_cq_table_free;
969 err = mlx4_init_qp_table(dev);
971 mlx4_err(dev, "Failed to initialize "
972 "queue pair table, aborting.\n");
973 goto err_srq_table_free;
976 err = mlx4_init_mcg_table(dev);
978 mlx4_err(dev, "Failed to initialize "
979 "multicast group table, aborting.\n");
980 goto err_qp_table_free;
983 err = mlx4_init_counters_table(dev);
984 if (err && err != -ENOENT) {
985 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
986 goto err_counters_table_free;
989 for (port = 1; port <= dev->caps.num_ports; port++) {
990 enum mlx4_port_type port_type = 0;
991 mlx4_SENSE_PORT(dev, port, &port_type);
993 dev->caps.port_type[port] = port_type;
994 ib_port_default_caps = 0;
995 err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
997 mlx4_warn(dev, "failed to get port %d default "
998 "ib capabilities (%d). Continuing with "
999 "caps = 0\n", port, err);
1000 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
1001 err = mlx4_SET_PORT(dev, port);
1003 mlx4_err(dev, "Failed to set port %d, aborting\n",
1005 goto err_mcg_table_free;
1008 mlx4_set_port_mask(dev);
1013 mlx4_cleanup_mcg_table(dev);
1015 err_counters_table_free:
1016 mlx4_cleanup_counters_table(dev);
1019 mlx4_cleanup_qp_table(dev);
1022 mlx4_cleanup_srq_table(dev);
1025 mlx4_cleanup_cq_table(dev);
1028 mlx4_cmd_use_polling(dev);
1031 mlx4_cleanup_eq_table(dev);
1034 mlx4_cleanup_mr_table(dev);
1037 mlx4_cleanup_pd_table(dev);
1043 mlx4_uar_free(dev, &priv->driver_uar);
1046 mlx4_cleanup_uar_table(dev);
1050 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
1052 struct mlx4_priv *priv = mlx4_priv(dev);
1053 struct msix_entry *entries;
1054 int nreq = min_t(int, dev->caps.num_ports *
1055 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
1056 + MSIX_LEGACY_SZ, MAX_MSIX);
1061 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
1063 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
1067 for (i = 0; i < nreq; ++i)
1068 entries[i].entry = i;
1071 err = pci_enable_msix(dev->pdev, entries, nreq);
1073 /* Try again if at least 2 vectors are available */
1075 mlx4_info(dev, "Requested %d vectors, "
1076 "but only %d MSI-X vectors available, "
1077 "trying again\n", nreq, err);
1086 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
1087 /*Working in legacy mode , all EQ's shared*/
1088 dev->caps.comp_pool = 0;
1089 dev->caps.num_comp_vectors = nreq - 1;
1091 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
1092 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
1094 for (i = 0; i < nreq; ++i)
1095 priv->eq_table.eq[i].irq = entries[i].vector;
1097 dev->flags |= MLX4_FLAG_MSI_X;
1104 dev->caps.num_comp_vectors = 1;
1105 dev->caps.comp_pool = 0;
1107 for (i = 0; i < 2; ++i)
1108 priv->eq_table.eq[i].irq = dev->pdev->irq;
1111 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
1113 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
1118 mlx4_init_mac_table(dev, &info->mac_table);
1119 mlx4_init_vlan_table(dev, &info->vlan_table);
1121 sprintf(info->dev_name, "mlx4_port%d", port);
1122 info->port_attr.attr.name = info->dev_name;
1123 info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
1124 info->port_attr.show = show_port_type;
1125 info->port_attr.store = set_port_type;
1126 sysfs_attr_init(&info->port_attr.attr);
1128 err = device_create_file(&dev->pdev->dev, &info->port_attr);
1130 mlx4_err(dev, "Failed to create file for port %d\n", port);
1137 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
1142 device_remove_file(&info->dev->pdev->dev, &info->port_attr);
1145 static int mlx4_init_steering(struct mlx4_dev *dev)
1147 struct mlx4_priv *priv = mlx4_priv(dev);
1148 int num_entries = dev->caps.num_ports;
1151 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
1155 for (i = 0; i < num_entries; i++) {
1156 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1157 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
1158 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
1160 INIT_LIST_HEAD(&priv->steer[i].high_prios);
1165 static void mlx4_clear_steering(struct mlx4_dev *dev)
1167 struct mlx4_priv *priv = mlx4_priv(dev);
1168 struct mlx4_steer_index *entry, *tmp_entry;
1169 struct mlx4_promisc_qp *pqp, *tmp_pqp;
1170 int num_entries = dev->caps.num_ports;
1173 for (i = 0; i < num_entries; i++) {
1174 for (j = 0; j < MLX4_NUM_STEERS; j++) {
1175 list_for_each_entry_safe(pqp, tmp_pqp,
1176 &priv->steer[i].promisc_qps[j],
1178 list_del(&pqp->list);
1181 list_for_each_entry_safe(entry, tmp_entry,
1182 &priv->steer[i].steer_entries[j],
1184 list_del(&entry->list);
1185 list_for_each_entry_safe(pqp, tmp_pqp,
1188 list_del(&pqp->list);
1198 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1200 struct mlx4_priv *priv;
1201 struct mlx4_dev *dev;
1205 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
1207 err = pci_enable_device(pdev);
1209 dev_err(&pdev->dev, "Cannot enable PCI device, "
1215 * Check for BARs. We expect 0: 1MB
1217 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
1218 pci_resource_len(pdev, 0) != 1 << 20) {
1219 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
1221 goto err_disable_pdev;
1223 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
1224 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
1226 goto err_disable_pdev;
1229 err = pci_request_regions(pdev, DRV_NAME);
1231 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
1232 goto err_disable_pdev;
1235 pci_set_master(pdev);
1237 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1239 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
1240 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1242 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
1243 goto err_release_regions;
1246 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1248 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
1249 "consistent PCI DMA mask.\n");
1250 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1252 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
1254 goto err_release_regions;
1258 /* Allow large DMA segments, up to the firmware limit of 1 GB */
1259 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
1261 priv = kzalloc(sizeof *priv, GFP_KERNEL);
1263 dev_err(&pdev->dev, "Device struct alloc failed, "
1266 goto err_release_regions;
1271 INIT_LIST_HEAD(&priv->ctx_list);
1272 spin_lock_init(&priv->ctx_lock);
1274 mutex_init(&priv->port_mutex);
1276 INIT_LIST_HEAD(&priv->pgdir_list);
1277 mutex_init(&priv->pgdir_mutex);
1279 INIT_LIST_HEAD(&priv->bf_list);
1280 mutex_init(&priv->bf_mutex);
1282 dev->rev_id = pdev->revision;
1285 * Now reset the HCA before we touch the PCI capabilities or
1286 * attempt a firmware command, since a boot ROM may have left
1287 * the HCA in an undefined state.
1289 err = mlx4_reset(dev);
1291 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
1295 if (mlx4_cmd_init(dev)) {
1296 mlx4_err(dev, "Failed to init command interface, aborting.\n");
1300 err = mlx4_init_hca(dev);
1304 err = mlx4_alloc_eq_table(dev);
1308 priv->msix_ctl.pool_bm = 0;
1309 spin_lock_init(&priv->msix_ctl.pool_lock);
1311 mlx4_enable_msi_x(dev);
1313 err = mlx4_init_steering(dev);
1317 err = mlx4_setup_hca(dev);
1318 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
1319 dev->flags &= ~MLX4_FLAG_MSI_X;
1320 pci_disable_msix(pdev);
1321 err = mlx4_setup_hca(dev);
1327 for (port = 1; port <= dev->caps.num_ports; port++) {
1328 err = mlx4_init_port_info(dev, port);
1333 err = mlx4_register_device(dev);
1337 mlx4_sense_init(dev);
1338 mlx4_start_sense(dev);
1340 pci_set_drvdata(pdev, dev);
1345 for (--port; port >= 1; --port)
1346 mlx4_cleanup_port_info(&priv->port[port]);
1348 mlx4_cleanup_counters_table(dev);
1349 mlx4_cleanup_mcg_table(dev);
1350 mlx4_cleanup_qp_table(dev);
1351 mlx4_cleanup_srq_table(dev);
1352 mlx4_cleanup_cq_table(dev);
1353 mlx4_cmd_use_polling(dev);
1354 mlx4_cleanup_eq_table(dev);
1355 mlx4_cleanup_mr_table(dev);
1356 mlx4_cleanup_pd_table(dev);
1357 mlx4_cleanup_uar_table(dev);
1360 mlx4_clear_steering(dev);
1363 mlx4_free_eq_table(dev);
1366 if (dev->flags & MLX4_FLAG_MSI_X)
1367 pci_disable_msix(pdev);
1369 mlx4_close_hca(dev);
1372 mlx4_cmd_cleanup(dev);
1377 err_release_regions:
1378 pci_release_regions(pdev);
1381 pci_disable_device(pdev);
1382 pci_set_drvdata(pdev, NULL);
1386 static int __devinit mlx4_init_one(struct pci_dev *pdev,
1387 const struct pci_device_id *id)
1389 printk_once(KERN_INFO "%s", mlx4_version);
1391 return __mlx4_init_one(pdev, id);
1394 static void mlx4_remove_one(struct pci_dev *pdev)
1396 struct mlx4_dev *dev = pci_get_drvdata(pdev);
1397 struct mlx4_priv *priv = mlx4_priv(dev);
1401 mlx4_stop_sense(dev);
1402 mlx4_unregister_device(dev);
1404 for (p = 1; p <= dev->caps.num_ports; p++) {
1405 mlx4_cleanup_port_info(&priv->port[p]);
1406 mlx4_CLOSE_PORT(dev, p);
1409 mlx4_cleanup_counters_table(dev);
1410 mlx4_cleanup_mcg_table(dev);
1411 mlx4_cleanup_qp_table(dev);
1412 mlx4_cleanup_srq_table(dev);
1413 mlx4_cleanup_cq_table(dev);
1414 mlx4_cmd_use_polling(dev);
1415 mlx4_cleanup_eq_table(dev);
1416 mlx4_cleanup_mr_table(dev);
1417 mlx4_cleanup_pd_table(dev);
1420 mlx4_uar_free(dev, &priv->driver_uar);
1421 mlx4_cleanup_uar_table(dev);
1422 mlx4_clear_steering(dev);
1423 mlx4_free_eq_table(dev);
1424 mlx4_close_hca(dev);
1425 mlx4_cmd_cleanup(dev);
1427 if (dev->flags & MLX4_FLAG_MSI_X)
1428 pci_disable_msix(pdev);
1431 pci_release_regions(pdev);
1432 pci_disable_device(pdev);
1433 pci_set_drvdata(pdev, NULL);
1437 int mlx4_restart_one(struct pci_dev *pdev)
1439 mlx4_remove_one(pdev);
1440 return __mlx4_init_one(pdev, NULL);
1443 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
1444 { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
1445 { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1446 { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
1447 { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
1448 { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1449 { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
1450 { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
1451 { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
1452 { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
1453 { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
1454 { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
1455 { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
1456 { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
1457 { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
1458 { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
1459 { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
1460 { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
1461 { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
1462 { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
1463 { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
1464 { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
1465 { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
1466 { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
1467 { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
1468 { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
1469 { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
1470 { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
1474 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
1476 static struct pci_driver mlx4_driver = {
1478 .id_table = mlx4_pci_table,
1479 .probe = mlx4_init_one,
1480 .remove = __devexit_p(mlx4_remove_one)
1483 static int __init mlx4_verify_params(void)
1485 if ((log_num_mac < 0) || (log_num_mac > 7)) {
1486 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
1490 if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
1491 pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
1495 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
1496 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1503 static int __init mlx4_init(void)
1507 if (mlx4_verify_params())
1512 mlx4_wq = create_singlethread_workqueue("mlx4");
1516 ret = pci_register_driver(&mlx4_driver);
1517 return ret < 0 ? ret : 0;
1520 static void __exit mlx4_cleanup(void)
1522 pci_unregister_driver(&mlx4_driver);
1523 destroy_workqueue(mlx4_wq);
1526 module_init(mlx4_init);
1527 module_exit(mlx4_cleanup);