2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if_vlan.h>
45 #include <linux/init.h>
46 #include <linux/log2.h>
47 #include <linux/mdio.h>
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #include <linux/mutex.h>
51 #include <linux/netdevice.h>
52 #include <linux/pci.h>
53 #include <linux/aer.h>
54 #include <linux/rtnetlink.h>
55 #include <linux/sched.h>
56 #include <linux/seq_file.h>
57 #include <linux/sockios.h>
58 #include <linux/vmalloc.h>
59 #include <linux/workqueue.h>
60 #include <net/neighbour.h>
61 #include <net/netevent.h>
62 #include <asm/uaccess.h>
70 #define DRV_VERSION "1.0.0-ko"
71 #define DRV_DESC "Chelsio T4 Network Driver"
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
78 #define MAX_SGE_TIMERVAL 200U
81 MEMWIN0_APERTURE = 65536,
82 MEMWIN0_BASE = 0x30000,
83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000,
85 MEMWIN2_APERTURE = 2048,
86 MEMWIN2_BASE = 0x1b800,
90 MAX_TXQ_ENTRIES = 16384,
91 MAX_CTRL_TXQ_ENTRIES = 1024,
92 MAX_RSPQ_ENTRIES = 16384,
93 MAX_RX_BUFFERS = 16384,
95 MIN_CTRL_TXQ_ENTRIES = 32,
96 MIN_RSPQ_ENTRIES = 128,
100 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
104 #define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
106 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
107 CH_DEVICE(0xa000), /* PE10K */
111 #define FW_FNAME "cxgb4/t4fw.bin"
113 MODULE_DESCRIPTION(DRV_DESC);
114 MODULE_AUTHOR("Chelsio Communications");
115 MODULE_LICENSE("Dual BSD/GPL");
116 MODULE_VERSION(DRV_VERSION);
117 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
118 MODULE_FIRMWARE(FW_FNAME);
120 static int dflt_msg_enable = DFLT_MSG_ENABLE;
122 module_param(dflt_msg_enable, int, 0644);
123 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
126 * The driver uses the best interrupt scheme available on a platform in the
127 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
128 * of these schemes the driver may consider as follows:
130 * msi = 2: choose from among all three options
131 * msi = 1: only consider MSI and INTx interrupts
132 * msi = 0: force INTx interrupts
136 module_param(msi, int, 0644);
137 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
140 * Queue interrupt hold-off timer values. Queues default to the first of these
143 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
145 module_param_array(intr_holdoff, uint, NULL, 0644);
146 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
147 "0..4 in microseconds");
149 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
151 module_param_array(intr_cnt, uint, NULL, 0644);
152 MODULE_PARM_DESC(intr_cnt,
153 "thresholds 1..3 for queue interrupt packet counters");
157 #ifdef CONFIG_PCI_IOV
158 module_param(vf_acls, bool, 0644);
159 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
161 static unsigned int num_vf[4];
163 module_param_array(num_vf, uint, NULL, 0644);
164 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
167 static struct dentry *cxgb4_debugfs_root;
169 static LIST_HEAD(adapter_list);
170 static DEFINE_MUTEX(uld_mutex);
171 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
172 static const char *uld_str[] = { "RDMA", "iSCSI" };
174 static void link_report(struct net_device *dev)
176 if (!netif_carrier_ok(dev))
177 netdev_info(dev, "link down\n");
179 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
181 const char *s = "10Mbps";
182 const struct port_info *p = netdev_priv(dev);
184 switch (p->link_cfg.speed) {
196 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
201 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
203 struct net_device *dev = adapter->port[port_id];
205 /* Skip changes from disabled ports. */
206 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
208 netif_carrier_on(dev);
210 netif_carrier_off(dev);
216 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
218 static const char *mod_str[] = {
219 NULL, "LR", "SR", "ER", "passive DA", "active DA"
222 const struct net_device *dev = adap->port[port_id];
223 const struct port_info *pi = netdev_priv(dev);
225 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
226 netdev_info(dev, "port module unplugged\n");
228 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
232 * Configure the exact and hash address filters to handle a port's multicast
233 * and secondary unicast MAC addresses.
235 static int set_addr_filters(const struct net_device *dev, bool sleep)
243 const struct netdev_hw_addr *ha;
244 int uc_cnt = netdev_uc_count(dev);
245 int mc_cnt = netdev_mc_count(dev);
246 const struct port_info *pi = netdev_priv(dev);
248 /* first do the secondary unicast addresses */
249 netdev_for_each_uc_addr(ha, dev) {
250 addr[naddr++] = ha->addr;
251 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
252 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
253 naddr, addr, filt_idx, &uhash, sleep);
262 /* next set up the multicast addresses */
263 netdev_for_each_mc_addr(ha, dev) {
264 addr[naddr++] = ha->addr;
265 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep);
276 return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
277 uhash | mhash, sleep);
281 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
282 * If @mtu is -1 it is left unchanged.
284 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
287 struct port_info *pi = netdev_priv(dev);
289 ret = set_addr_filters(dev, sleep_ok);
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0,
293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
299 * link_start - enable a port
300 * @dev: the port to enable
302 * Performs the MAC and PHY actions needed to enable a port.
304 static int link_start(struct net_device *dev)
307 struct port_info *pi = netdev_priv(dev);
310 * We do not set address filters and promiscuity here, the stack does
311 * that step explicitly.
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
316 ret = t4_change_mac(pi->adapter, 0, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true,
320 pi->xact_addr_filt = ret;
325 ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
327 ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
332 * Response queue handler for the FW event queue.
334 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
335 const struct pkt_gl *gl)
337 u8 opcode = ((const struct rss_header *)rsp)->opcode;
339 rsp++; /* skip RSS header */
340 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
341 const struct cpl_sge_egr_update *p = (void *)rsp;
342 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
343 struct sge_txq *txq = q->adap->sge.egr_map[qid];
346 if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
347 struct sge_eth_txq *eq;
349 eq = container_of(txq, struct sge_eth_txq, q);
350 netif_tx_wake_queue(eq->txq);
352 struct sge_ofld_txq *oq;
354 oq = container_of(txq, struct sge_ofld_txq, q);
355 tasklet_schedule(&oq->qresume_tsk);
357 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
358 const struct cpl_fw6_msg *p = (void *)rsp;
361 t4_handle_fw_rpl(q->adap, p->data);
362 } else if (opcode == CPL_L2T_WRITE_RPL) {
363 const struct cpl_l2t_write_rpl *p = (void *)rsp;
365 do_l2t_write_rpl(q->adap, p);
367 dev_err(q->adap->pdev_dev,
368 "unexpected CPL %#x on FW event queue\n", opcode);
373 * uldrx_handler - response queue handler for ULD queues
374 * @q: the response queue that received the packet
375 * @rsp: the response queue descriptor holding the offload message
376 * @gl: the gather list of packet fragments
378 * Deliver an ingress offload packet to a ULD. All processing is done by
379 * the ULD, we just maintain statistics.
381 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
382 const struct pkt_gl *gl)
384 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
386 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
392 else if (gl == CXGB4_MSG_AN)
399 static void disable_msi(struct adapter *adapter)
401 if (adapter->flags & USING_MSIX) {
402 pci_disable_msix(adapter->pdev);
403 adapter->flags &= ~USING_MSIX;
404 } else if (adapter->flags & USING_MSI) {
405 pci_disable_msi(adapter->pdev);
406 adapter->flags &= ~USING_MSI;
411 * Interrupt handler for non-data events used with MSI-X.
413 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
415 struct adapter *adap = cookie;
417 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
420 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
422 t4_slow_intr_handler(adap);
427 * Name the MSI-X interrupts.
429 static void name_msix_vecs(struct adapter *adap)
431 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
433 /* non-data interrupts */
434 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
435 adap->msix_info[0].desc[n] = 0;
438 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
439 adap->msix_info[1].desc[n] = 0;
441 /* Ethernet queues */
442 for_each_port(adap, j) {
443 struct net_device *d = adap->port[j];
444 const struct port_info *pi = netdev_priv(d);
446 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
447 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
449 adap->msix_info[msi_idx].desc[n] = 0;
454 for_each_ofldrxq(&adap->sge, i) {
455 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
457 adap->msix_info[msi_idx++].desc[n] = 0;
459 for_each_rdmarxq(&adap->sge, i) {
460 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
462 adap->msix_info[msi_idx++].desc[n] = 0;
466 static int request_msix_queue_irqs(struct adapter *adap)
468 struct sge *s = &adap->sge;
469 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
471 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
472 adap->msix_info[1].desc, &s->fw_evtq);
476 for_each_ethrxq(s, ethqidx) {
477 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
478 adap->msix_info[msi].desc,
479 &s->ethrxq[ethqidx].rspq);
484 for_each_ofldrxq(s, ofldqidx) {
485 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
486 adap->msix_info[msi].desc,
487 &s->ofldrxq[ofldqidx].rspq);
492 for_each_rdmarxq(s, rdmaqidx) {
493 err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
494 adap->msix_info[msi].desc,
495 &s->rdmarxq[rdmaqidx].rspq);
503 while (--rdmaqidx >= 0)
504 free_irq(adap->msix_info[--msi].vec,
505 &s->rdmarxq[rdmaqidx].rspq);
506 while (--ofldqidx >= 0)
507 free_irq(adap->msix_info[--msi].vec,
508 &s->ofldrxq[ofldqidx].rspq);
509 while (--ethqidx >= 0)
510 free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
511 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
515 static void free_msix_queue_irqs(struct adapter *adap)
518 struct sge *s = &adap->sge;
520 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
521 for_each_ethrxq(s, i)
522 free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
523 for_each_ofldrxq(s, i)
524 free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
525 for_each_rdmarxq(s, i)
526 free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
530 * setup_rss - configure RSS
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for all ports since the mapping
538 * table has plenty of entries.
540 static int setup_rss(struct adapter *adap)
543 u16 rss[MAX_ETH_QSETS];
545 for_each_port(adap, i) {
546 const struct port_info *pi = adap2pinfo(adap, i);
547 const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
549 for (j = 0; j < pi->nqsets; j++)
550 rss[j] = q[j].rspq.abs_id;
552 err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
561 * Wait until all NAPI handlers are descheduled.
563 static void quiesce_rx(struct adapter *adap)
567 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
568 struct sge_rspq *q = adap->sge.ingr_map[i];
571 napi_disable(&q->napi);
576 * Enable NAPI scheduling and interrupt generation for all Rx queues.
578 static void enable_rx(struct adapter *adap)
582 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
583 struct sge_rspq *q = adap->sge.ingr_map[i];
588 napi_enable(&q->napi);
589 /* 0-increment GTS to start the timer and enable interrupts */
590 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
591 SEINTARM(q->intr_params) |
592 INGRESSQID(q->cntxt_id));
597 * setup_sge_queues - configure SGE Tx/Rx/response queues
600 * Determines how many sets of SGE queues to use and initializes them.
601 * We support multiple queue sets per port if we have MSI-X, otherwise
602 * just one queue set per port.
604 static int setup_sge_queues(struct adapter *adap)
606 int err, msi_idx, i, j;
607 struct sge *s = &adap->sge;
609 bitmap_zero(s->starving_fl, MAX_EGRQ);
610 bitmap_zero(s->txq_maperr, MAX_EGRQ);
612 if (adap->flags & USING_MSIX)
613 msi_idx = 1; /* vector 0 is for non-queue interrupts */
615 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
619 msi_idx = -((int)s->intrq.abs_id + 1);
622 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
623 msi_idx, NULL, fwevtq_handler);
625 freeout: t4_free_sge_resources(adap);
629 for_each_port(adap, i) {
630 struct net_device *dev = adap->port[i];
631 struct port_info *pi = netdev_priv(dev);
632 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
633 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
635 for (j = 0; j < pi->nqsets; j++, q++) {
638 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
644 memset(&q->stats, 0, sizeof(q->stats));
646 for (j = 0; j < pi->nqsets; j++, t++) {
647 err = t4_sge_alloc_eth_txq(adap, t, dev,
648 netdev_get_tx_queue(dev, j),
649 s->fw_evtq.cntxt_id);
655 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
656 for_each_ofldrxq(s, i) {
657 struct sge_ofld_rxq *q = &s->ofldrxq[i];
658 struct net_device *dev = adap->port[i / j];
662 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
663 &q->fl, uldrx_handler);
666 memset(&q->stats, 0, sizeof(q->stats));
667 s->ofld_rxq[i] = q->rspq.abs_id;
668 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
669 s->fw_evtq.cntxt_id);
674 for_each_rdmarxq(s, i) {
675 struct sge_ofld_rxq *q = &s->rdmarxq[i];
679 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
680 msi_idx, &q->fl, uldrx_handler);
683 memset(&q->stats, 0, sizeof(q->stats));
684 s->rdma_rxq[i] = q->rspq.abs_id;
687 for_each_port(adap, i) {
689 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
690 * have RDMA queues, and that's the right value.
692 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
694 s->rdmarxq[i].rspq.cntxt_id);
699 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
700 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
701 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
706 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
707 * started but failed, and a negative errno if flash load couldn't start.
709 static int upgrade_fw(struct adapter *adap)
713 const struct fw_hdr *hdr;
714 const struct firmware *fw;
715 struct device *dev = adap->pdev_dev;
717 ret = request_firmware(&fw, FW_FNAME, dev);
719 dev_err(dev, "unable to load firmware image " FW_FNAME
720 ", error %d\n", ret);
724 hdr = (const struct fw_hdr *)fw->data;
725 vers = ntohl(hdr->fw_ver);
726 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
727 ret = -EINVAL; /* wrong major version, won't do */
732 * If the flash FW is unusable or we found something newer, load it.
734 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
735 vers > adap->params.fw_vers) {
736 ret = -t4_load_fw(adap, fw->data, fw->size);
738 dev_info(dev, "firmware upgraded to version %pI4 from "
739 FW_FNAME "\n", &hdr->fw_ver);
741 out: release_firmware(fw);
746 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
747 * The allocated memory is cleared.
749 void *t4_alloc_mem(size_t size)
751 void *p = kmalloc(size, GFP_KERNEL);
761 * Free memory allocated through alloc_mem().
763 void t4_free_mem(void *addr)
765 if (is_vmalloc_addr(addr))
771 static inline int is_offload(const struct adapter *adap)
773 return adap->params.offload;
777 * Implementation of ethtool operations.
780 static u32 get_msglevel(struct net_device *dev)
782 return netdev2adap(dev)->msg_enable;
785 static void set_msglevel(struct net_device *dev, u32 val)
787 netdev2adap(dev)->msg_enable = val;
790 static char stats_strings[][ETH_GSTRING_LEN] = {
793 "TxBroadcastFrames ",
794 "TxMulticastFrames ",
802 "TxFrames512To1023 ",
803 "TxFrames1024To1518 ",
804 "TxFrames1519ToMax ",
819 "RxBroadcastFrames ",
820 "RxMulticastFrames ",
834 "RxFrames512To1023 ",
835 "RxFrames1024To1518 ",
836 "RxFrames1519ToMax ",
848 "RxBG0FramesDropped ",
849 "RxBG1FramesDropped ",
850 "RxBG2FramesDropped ",
851 "RxBG3FramesDropped ",
864 static int get_sset_count(struct net_device *dev, int sset)
868 return ARRAY_SIZE(stats_strings);
874 #define T4_REGMAP_SIZE (160 * 1024)
876 static int get_regs_len(struct net_device *dev)
878 return T4_REGMAP_SIZE;
881 static int get_eeprom_len(struct net_device *dev)
886 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
888 struct adapter *adapter = netdev2adap(dev);
890 strcpy(info->driver, KBUILD_MODNAME);
891 strcpy(info->version, DRV_VERSION);
892 strcpy(info->bus_info, pci_name(adapter->pdev));
894 if (!adapter->params.fw_vers)
895 strcpy(info->fw_version, "N/A");
897 snprintf(info->fw_version, sizeof(info->fw_version),
898 "%u.%u.%u.%u, TP %u.%u.%u.%u",
899 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
900 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
901 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
902 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
903 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
904 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
905 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
906 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
909 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
911 if (stringset == ETH_SS_STATS)
912 memcpy(data, stats_strings, sizeof(stats_strings));
916 * port stats maintained per queue of the port. They should be in the same
917 * order as in stats_strings above.
919 struct queue_port_stats {
927 static void collect_sge_port_stats(const struct adapter *adap,
928 const struct port_info *p, struct queue_port_stats *s)
931 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
932 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
934 memset(s, 0, sizeof(*s));
935 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
937 s->tx_csum += tx->tx_cso;
938 s->rx_csum += rx->stats.rx_cso;
939 s->vlan_ex += rx->stats.vlan_ex;
940 s->vlan_ins += tx->vlan_ins;
944 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
947 struct port_info *pi = netdev_priv(dev);
948 struct adapter *adapter = pi->adapter;
950 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
952 data += sizeof(struct port_stats) / sizeof(u64);
953 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
957 * Return a version number to identify the type of adapter. The scheme is:
958 * - bits 0..9: chip version
959 * - bits 10..15: chip revision
961 static inline unsigned int mk_adap_vers(const struct adapter *ap)
963 return 4 | (ap->params.rev << 10);
966 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
969 u32 *p = buf + start;
971 for ( ; start <= end; start += sizeof(u32))
972 *p++ = t4_read_reg(ap, start);
975 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
978 static const unsigned int reg_ranges[] = {
1197 struct adapter *ap = netdev2adap(dev);
1199 regs->version = mk_adap_vers(ap);
1201 memset(buf, 0, T4_REGMAP_SIZE);
1202 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1203 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1206 static int restart_autoneg(struct net_device *dev)
1208 struct port_info *p = netdev_priv(dev);
1210 if (!netif_running(dev))
1212 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1214 t4_restart_aneg(p->adapter, 0, p->tx_chan);
1218 static int identify_port(struct net_device *dev, u32 data)
1221 data = 2; /* default to 2 seconds */
1223 return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
1227 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1231 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
1233 if (caps & FW_PORT_CAP_SPEED_100M)
1234 v |= SUPPORTED_100baseT_Full;
1235 if (caps & FW_PORT_CAP_SPEED_1G)
1236 v |= SUPPORTED_1000baseT_Full;
1237 if (caps & FW_PORT_CAP_SPEED_10G)
1238 v |= SUPPORTED_10000baseT_Full;
1239 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1240 v |= SUPPORTED_Backplane;
1241 if (caps & FW_PORT_CAP_SPEED_1G)
1242 v |= SUPPORTED_1000baseKX_Full;
1243 if (caps & FW_PORT_CAP_SPEED_10G)
1244 v |= SUPPORTED_10000baseKX4_Full;
1245 } else if (type == FW_PORT_TYPE_KR)
1246 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1247 else if (type == FW_PORT_TYPE_FIBER)
1248 v |= SUPPORTED_FIBRE;
1250 if (caps & FW_PORT_CAP_ANEG)
1251 v |= SUPPORTED_Autoneg;
1255 static unsigned int to_fw_linkcaps(unsigned int caps)
1259 if (caps & ADVERTISED_100baseT_Full)
1260 v |= FW_PORT_CAP_SPEED_100M;
1261 if (caps & ADVERTISED_1000baseT_Full)
1262 v |= FW_PORT_CAP_SPEED_1G;
1263 if (caps & ADVERTISED_10000baseT_Full)
1264 v |= FW_PORT_CAP_SPEED_10G;
1268 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1270 const struct port_info *p = netdev_priv(dev);
1272 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1273 p->port_type == FW_PORT_TYPE_BT_XAUI)
1274 cmd->port = PORT_TP;
1275 else if (p->port_type == FW_PORT_TYPE_FIBER)
1276 cmd->port = PORT_FIBRE;
1277 else if (p->port_type == FW_PORT_TYPE_TWINAX)
1278 cmd->port = PORT_DA;
1280 cmd->port = PORT_OTHER;
1282 if (p->mdio_addr >= 0) {
1283 cmd->phy_address = p->mdio_addr;
1284 cmd->transceiver = XCVR_EXTERNAL;
1285 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1286 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1288 cmd->phy_address = 0; /* not really, but no better option */
1289 cmd->transceiver = XCVR_INTERNAL;
1290 cmd->mdio_support = 0;
1293 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1294 cmd->advertising = from_fw_linkcaps(p->port_type,
1295 p->link_cfg.advertising);
1296 cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
1297 cmd->duplex = DUPLEX_FULL;
1298 cmd->autoneg = p->link_cfg.autoneg;
1304 static unsigned int speed_to_caps(int speed)
1306 if (speed == SPEED_100)
1307 return FW_PORT_CAP_SPEED_100M;
1308 if (speed == SPEED_1000)
1309 return FW_PORT_CAP_SPEED_1G;
1310 if (speed == SPEED_10000)
1311 return FW_PORT_CAP_SPEED_10G;
1315 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1318 struct port_info *p = netdev_priv(dev);
1319 struct link_config *lc = &p->link_cfg;
1321 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1324 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1326 * PHY offers a single speed. See if that's what's
1329 if (cmd->autoneg == AUTONEG_DISABLE &&
1330 (lc->supported & speed_to_caps(cmd->speed)))
1335 if (cmd->autoneg == AUTONEG_DISABLE) {
1336 cap = speed_to_caps(cmd->speed);
1338 if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
1339 cmd->speed == SPEED_10000)
1341 lc->requested_speed = cap;
1342 lc->advertising = 0;
1344 cap = to_fw_linkcaps(cmd->advertising);
1345 if (!(lc->supported & cap))
1347 lc->requested_speed = 0;
1348 lc->advertising = cap | FW_PORT_CAP_ANEG;
1350 lc->autoneg = cmd->autoneg;
1352 if (netif_running(dev))
1353 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1357 static void get_pauseparam(struct net_device *dev,
1358 struct ethtool_pauseparam *epause)
1360 struct port_info *p = netdev_priv(dev);
1362 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1363 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1364 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1367 static int set_pauseparam(struct net_device *dev,
1368 struct ethtool_pauseparam *epause)
1370 struct port_info *p = netdev_priv(dev);
1371 struct link_config *lc = &p->link_cfg;
1373 if (epause->autoneg == AUTONEG_DISABLE)
1374 lc->requested_fc = 0;
1375 else if (lc->supported & FW_PORT_CAP_ANEG)
1376 lc->requested_fc = PAUSE_AUTONEG;
1380 if (epause->rx_pause)
1381 lc->requested_fc |= PAUSE_RX;
1382 if (epause->tx_pause)
1383 lc->requested_fc |= PAUSE_TX;
1384 if (netif_running(dev))
1385 return t4_link_start(p->adapter, 0, p->tx_chan, lc);
1389 static u32 get_rx_csum(struct net_device *dev)
1391 struct port_info *p = netdev_priv(dev);
1393 return p->rx_offload & RX_CSO;
1396 static int set_rx_csum(struct net_device *dev, u32 data)
1398 struct port_info *p = netdev_priv(dev);
1401 p->rx_offload |= RX_CSO;
1403 p->rx_offload &= ~RX_CSO;
1407 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1409 const struct port_info *pi = netdev_priv(dev);
1410 const struct sge *s = &pi->adapter->sge;
1412 e->rx_max_pending = MAX_RX_BUFFERS;
1413 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1414 e->rx_jumbo_max_pending = 0;
1415 e->tx_max_pending = MAX_TXQ_ENTRIES;
1417 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1418 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1419 e->rx_jumbo_pending = 0;
1420 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1423 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1426 const struct port_info *pi = netdev_priv(dev);
1427 struct adapter *adapter = pi->adapter;
1428 struct sge *s = &adapter->sge;
1430 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1431 e->tx_pending > MAX_TXQ_ENTRIES ||
1432 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1433 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1434 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1437 if (adapter->flags & FULL_INIT_DONE)
1440 for (i = 0; i < pi->nqsets; ++i) {
1441 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1442 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1443 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1448 static int closest_timer(const struct sge *s, int time)
1450 int i, delta, match = 0, min_delta = INT_MAX;
1452 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1453 delta = time - s->timer_val[i];
1456 if (delta < min_delta) {
1464 static int closest_thres(const struct sge *s, int thres)
1466 int i, delta, match = 0, min_delta = INT_MAX;
1468 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1469 delta = thres - s->counter_val[i];
1472 if (delta < min_delta) {
1481 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1483 static unsigned int qtimer_val(const struct adapter *adap,
1484 const struct sge_rspq *q)
1486 unsigned int idx = q->intr_params >> 1;
1488 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1492 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1493 * @adap: the adapter
1495 * @us: the hold-off time in us, or 0 to disable timer
1496 * @cnt: the hold-off packet count, or 0 to disable counter
1498 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1499 * one of the two needs to be enabled for the queue to generate interrupts.
1501 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1502 unsigned int us, unsigned int cnt)
1504 if ((us | cnt) == 0)
1511 new_idx = closest_thres(&adap->sge, cnt);
1512 if (q->desc && q->pktcnt_idx != new_idx) {
1513 /* the queue has already been created, update it */
1514 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1515 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1516 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1517 err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
1521 q->pktcnt_idx = new_idx;
1524 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1525 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1529 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1531 const struct port_info *pi = netdev_priv(dev);
1532 struct adapter *adap = pi->adapter;
1534 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1535 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1538 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1540 const struct port_info *pi = netdev_priv(dev);
1541 const struct adapter *adap = pi->adapter;
1542 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1544 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1545 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1546 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1551 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1552 * through virtual addresses starting at 31K, the rest is accessed through
1553 * virtual addresses starting at 0. This mapping is correct only for PF0.
1555 static int eeprom_ptov(unsigned int phys_addr)
1557 if (phys_addr < 1024)
1558 return phys_addr + (31 << 10);
1559 if (phys_addr < EEPROMSIZE)
1560 return phys_addr - 1024;
1565 * The next two routines implement eeprom read/write from physical addresses.
1566 * The physical->virtual translation is correct only for PF0.
1568 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1570 int vaddr = eeprom_ptov(phys_addr);
1573 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1574 return vaddr < 0 ? vaddr : 0;
1577 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1579 int vaddr = eeprom_ptov(phys_addr);
1582 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1583 return vaddr < 0 ? vaddr : 0;
1586 #define EEPROM_MAGIC 0x38E2F10C
1588 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1592 struct adapter *adapter = netdev2adap(dev);
1594 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1598 e->magic = EEPROM_MAGIC;
1599 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1600 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1603 memcpy(data, buf + e->offset, e->len);
1608 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1613 u32 aligned_offset, aligned_len, *p;
1614 struct adapter *adapter = netdev2adap(dev);
1616 if (eeprom->magic != EEPROM_MAGIC)
1619 aligned_offset = eeprom->offset & ~3;
1620 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1622 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1624 * RMW possibly needed for first or last words.
1626 buf = kmalloc(aligned_len, GFP_KERNEL);
1629 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1630 if (!err && aligned_len > 4)
1631 err = eeprom_rd_phys(adapter,
1632 aligned_offset + aligned_len - 4,
1633 (u32 *)&buf[aligned_len - 4]);
1636 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1640 err = t4_seeprom_wp(adapter, false);
1644 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1645 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1646 aligned_offset += 4;
1650 err = t4_seeprom_wp(adapter, true);
1657 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1660 const struct firmware *fw;
1661 struct adapter *adap = netdev2adap(netdev);
1663 ef->data[sizeof(ef->data) - 1] = '\0';
1664 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1668 ret = t4_load_fw(adap, fw->data, fw->size);
1669 release_firmware(fw);
1671 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1675 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1676 #define BCAST_CRC 0xa0ccc1a6
1678 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1680 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1681 wol->wolopts = netdev2adap(dev)->wol;
1682 memset(&wol->sopass, 0, sizeof(wol->sopass));
1685 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1688 struct port_info *pi = netdev_priv(dev);
1690 if (wol->wolopts & ~WOL_SUPPORTED)
1692 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1693 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1694 if (wol->wolopts & WAKE_BCAST) {
1695 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1698 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1699 ~6ULL, ~0ULL, BCAST_CRC, true);
1701 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1705 static int set_tso(struct net_device *dev, u32 value)
1708 dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
1710 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1714 static int set_flags(struct net_device *dev, u32 flags)
1716 if (flags & ~ETH_FLAG_RXHASH)
1719 if (flags & ETH_FLAG_RXHASH)
1720 dev->features |= NETIF_F_RXHASH;
1722 dev->features &= ~NETIF_F_RXHASH;
1726 static struct ethtool_ops cxgb_ethtool_ops = {
1727 .get_settings = get_settings,
1728 .set_settings = set_settings,
1729 .get_drvinfo = get_drvinfo,
1730 .get_msglevel = get_msglevel,
1731 .set_msglevel = set_msglevel,
1732 .get_ringparam = get_sge_param,
1733 .set_ringparam = set_sge_param,
1734 .get_coalesce = get_coalesce,
1735 .set_coalesce = set_coalesce,
1736 .get_eeprom_len = get_eeprom_len,
1737 .get_eeprom = get_eeprom,
1738 .set_eeprom = set_eeprom,
1739 .get_pauseparam = get_pauseparam,
1740 .set_pauseparam = set_pauseparam,
1741 .get_rx_csum = get_rx_csum,
1742 .set_rx_csum = set_rx_csum,
1743 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1744 .set_sg = ethtool_op_set_sg,
1745 .get_link = ethtool_op_get_link,
1746 .get_strings = get_strings,
1747 .phys_id = identify_port,
1748 .nway_reset = restart_autoneg,
1749 .get_sset_count = get_sset_count,
1750 .get_ethtool_stats = get_stats,
1751 .get_regs_len = get_regs_len,
1752 .get_regs = get_regs,
1756 .set_flags = set_flags,
1757 .flash_device = set_flash,
1764 static int mem_open(struct inode *inode, struct file *file)
1766 file->private_data = inode->i_private;
1770 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
1774 loff_t avail = file->f_path.dentry->d_inode->i_size;
1775 unsigned int mem = (uintptr_t)file->private_data & 3;
1776 struct adapter *adap = file->private_data - mem;
1782 if (count > avail - pos)
1783 count = avail - pos;
1791 ret = t4_mc_read(adap, pos, data, NULL);
1793 ret = t4_edc_read(adap, mem, pos, data, NULL);
1797 ofst = pos % sizeof(data);
1798 len = min(count, sizeof(data) - ofst);
1799 if (copy_to_user(buf, (u8 *)data + ofst, len))
1806 count = pos - *ppos;
1811 static const struct file_operations mem_debugfs_fops = {
1812 .owner = THIS_MODULE,
1817 static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
1818 unsigned int idx, unsigned int size_mb)
1822 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
1823 (void *)adap + idx, &mem_debugfs_fops);
1824 if (de && de->d_inode)
1825 de->d_inode->i_size = size_mb << 20;
1828 static int __devinit setup_debugfs(struct adapter *adap)
1832 if (IS_ERR_OR_NULL(adap->debugfs_root))
1835 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
1836 if (i & EDRAM0_ENABLE)
1837 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
1838 if (i & EDRAM1_ENABLE)
1839 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
1840 if (i & EXT_MEM_ENABLE)
1841 add_debugfs_mem(adap, "mc", MEM_MC,
1842 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
1844 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
1850 * upper-layer driver support
1854 * Allocate an active-open TID and set it to the supplied value.
1856 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1860 spin_lock_bh(&t->atid_lock);
1862 union aopen_entry *p = t->afree;
1864 atid = p - t->atid_tab;
1869 spin_unlock_bh(&t->atid_lock);
1872 EXPORT_SYMBOL(cxgb4_alloc_atid);
1875 * Release an active-open TID.
1877 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1879 union aopen_entry *p = &t->atid_tab[atid];
1881 spin_lock_bh(&t->atid_lock);
1885 spin_unlock_bh(&t->atid_lock);
1887 EXPORT_SYMBOL(cxgb4_free_atid);
1890 * Allocate a server TID and set it to the supplied value.
1892 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1896 spin_lock_bh(&t->stid_lock);
1897 if (family == PF_INET) {
1898 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1899 if (stid < t->nstids)
1900 __set_bit(stid, t->stid_bmap);
1904 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
1909 t->stid_tab[stid].data = data;
1910 stid += t->stid_base;
1913 spin_unlock_bh(&t->stid_lock);
1916 EXPORT_SYMBOL(cxgb4_alloc_stid);
1919 * Release a server TID.
1921 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1923 stid -= t->stid_base;
1924 spin_lock_bh(&t->stid_lock);
1925 if (family == PF_INET)
1926 __clear_bit(stid, t->stid_bmap);
1928 bitmap_release_region(t->stid_bmap, stid, 2);
1929 t->stid_tab[stid].data = NULL;
1931 spin_unlock_bh(&t->stid_lock);
1933 EXPORT_SYMBOL(cxgb4_free_stid);
1936 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1938 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1941 struct cpl_tid_release *req;
1943 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1944 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
1945 INIT_TP_WR(req, tid);
1946 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1950 * Queue a TID release request and if necessary schedule a work queue to
1953 void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1956 void **p = &t->tid_tab[tid];
1957 struct adapter *adap = container_of(t, struct adapter, tids);
1959 spin_lock_bh(&adap->tid_release_lock);
1960 *p = adap->tid_release_head;
1961 /* Low 2 bits encode the Tx channel number */
1962 adap->tid_release_head = (void **)((uintptr_t)p | chan);
1963 if (!adap->tid_release_task_busy) {
1964 adap->tid_release_task_busy = true;
1965 schedule_work(&adap->tid_release_task);
1967 spin_unlock_bh(&adap->tid_release_lock);
1969 EXPORT_SYMBOL(cxgb4_queue_tid_release);
1972 * Process the list of pending TID release requests.
1974 static void process_tid_release_list(struct work_struct *work)
1976 struct sk_buff *skb;
1977 struct adapter *adap;
1979 adap = container_of(work, struct adapter, tid_release_task);
1981 spin_lock_bh(&adap->tid_release_lock);
1982 while (adap->tid_release_head) {
1983 void **p = adap->tid_release_head;
1984 unsigned int chan = (uintptr_t)p & 3;
1985 p = (void *)p - chan;
1987 adap->tid_release_head = *p;
1989 spin_unlock_bh(&adap->tid_release_lock);
1991 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1993 schedule_timeout_uninterruptible(1);
1995 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1996 t4_ofld_send(adap, skb);
1997 spin_lock_bh(&adap->tid_release_lock);
1999 adap->tid_release_task_busy = false;
2000 spin_unlock_bh(&adap->tid_release_lock);
2004 * Release a TID and inform HW. If we are unable to allocate the release
2005 * message we defer to a work queue.
2007 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2010 struct sk_buff *skb;
2011 struct adapter *adap = container_of(t, struct adapter, tids);
2013 old = t->tid_tab[tid];
2014 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2016 t->tid_tab[tid] = NULL;
2017 mk_tid_release(skb, chan, tid);
2018 t4_ofld_send(adap, skb);
2020 cxgb4_queue_tid_release(t, chan, tid);
2022 atomic_dec(&t->tids_in_use);
2024 EXPORT_SYMBOL(cxgb4_remove_tid);
2027 * Allocate and initialize the TID tables. Returns 0 on success.
2029 static int tid_init(struct tid_info *t)
2032 unsigned int natids = t->natids;
2034 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2035 t->nstids * sizeof(*t->stid_tab) +
2036 BITS_TO_LONGS(t->nstids) * sizeof(long);
2037 t->tid_tab = t4_alloc_mem(size);
2041 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2042 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2043 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2044 spin_lock_init(&t->stid_lock);
2045 spin_lock_init(&t->atid_lock);
2047 t->stids_in_use = 0;
2049 t->atids_in_use = 0;
2050 atomic_set(&t->tids_in_use, 0);
2052 /* Setup the free list for atid_tab and clear the stid bitmap. */
2055 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2056 t->afree = t->atid_tab;
2058 bitmap_zero(t->stid_bmap, t->nstids);
2063 * cxgb4_create_server - create an IP server
2065 * @stid: the server TID
2066 * @sip: local IP address to bind server to
2067 * @sport: the server's TCP port
2068 * @queue: queue to direct messages from this server to
2070 * Create an IP server for the given port and address.
2071 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2073 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2074 __be32 sip, __be16 sport, unsigned int queue)
2077 struct sk_buff *skb;
2078 struct adapter *adap;
2079 struct cpl_pass_open_req *req;
2081 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2085 adap = netdev2adap(dev);
2086 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2088 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2089 req->local_port = sport;
2090 req->peer_port = htons(0);
2091 req->local_ip = sip;
2092 req->peer_ip = htonl(0);
2093 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2094 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2095 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2096 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2097 return t4_mgmt_tx(adap, skb);
2099 EXPORT_SYMBOL(cxgb4_create_server);
2102 * cxgb4_create_server6 - create an IPv6 server
2104 * @stid: the server TID
2105 * @sip: local IPv6 address to bind server to
2106 * @sport: the server's TCP port
2107 * @queue: queue to direct messages from this server to
2109 * Create an IPv6 server for the given port and address.
2110 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2112 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
2113 const struct in6_addr *sip, __be16 sport,
2117 struct sk_buff *skb;
2118 struct adapter *adap;
2119 struct cpl_pass_open_req6 *req;
2121 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2125 adap = netdev2adap(dev);
2126 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
2128 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
2129 req->local_port = sport;
2130 req->peer_port = htons(0);
2131 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
2132 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
2133 req->peer_ip_hi = cpu_to_be64(0);
2134 req->peer_ip_lo = cpu_to_be64(0);
2135 chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
2136 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2137 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2138 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2139 return t4_mgmt_tx(adap, skb);
2141 EXPORT_SYMBOL(cxgb4_create_server6);
2144 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2145 * @mtus: the HW MTU table
2146 * @mtu: the target MTU
2147 * @idx: index of selected entry in the MTU table
2149 * Returns the index and the value in the HW MTU table that is closest to
2150 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2151 * table, in which case that smallest available value is selected.
2153 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2158 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2164 EXPORT_SYMBOL(cxgb4_best_mtu);
2167 * cxgb4_port_chan - get the HW channel of a port
2168 * @dev: the net device for the port
2170 * Return the HW Tx channel of the given port.
2172 unsigned int cxgb4_port_chan(const struct net_device *dev)
2174 return netdev2pinfo(dev)->tx_chan;
2176 EXPORT_SYMBOL(cxgb4_port_chan);
2179 * cxgb4_port_viid - get the VI id of a port
2180 * @dev: the net device for the port
2182 * Return the VI id of the given port.
2184 unsigned int cxgb4_port_viid(const struct net_device *dev)
2186 return netdev2pinfo(dev)->viid;
2188 EXPORT_SYMBOL(cxgb4_port_viid);
2191 * cxgb4_port_idx - get the index of a port
2192 * @dev: the net device for the port
2194 * Return the index of the given port.
2196 unsigned int cxgb4_port_idx(const struct net_device *dev)
2198 return netdev2pinfo(dev)->port_id;
2200 EXPORT_SYMBOL(cxgb4_port_idx);
2203 * cxgb4_netdev_by_hwid - return the net device of a HW port
2204 * @pdev: identifies the adapter
2205 * @id: the HW port id
2207 * Return the net device associated with the interface with the given HW
2210 struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
2212 const struct adapter *adap = pci_get_drvdata(pdev);
2214 if (!adap || id >= NCHAN)
2216 id = adap->chan_map[id];
2217 return id < MAX_NPORTS ? adap->port[id] : NULL;
2219 EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
2221 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2222 struct tp_tcp_stats *v6)
2224 struct adapter *adap = pci_get_drvdata(pdev);
2226 spin_lock(&adap->stats_lock);
2227 t4_tp_get_tcp_stats(adap, v4, v6);
2228 spin_unlock(&adap->stats_lock);
2230 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2232 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2233 const unsigned int *pgsz_order)
2235 struct adapter *adap = netdev2adap(dev);
2237 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2238 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2239 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2240 HPZ3(pgsz_order[3]));
2242 EXPORT_SYMBOL(cxgb4_iscsi_init);
2244 static struct pci_driver cxgb4_driver;
2246 static void check_neigh_update(struct neighbour *neigh)
2248 const struct device *parent;
2249 const struct net_device *netdev = neigh->dev;
2251 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2252 netdev = vlan_dev_real_dev(netdev);
2253 parent = netdev->dev.parent;
2254 if (parent && parent->driver == &cxgb4_driver.driver)
2255 t4_l2t_update(dev_get_drvdata(parent), neigh);
2258 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2262 case NETEVENT_NEIGH_UPDATE:
2263 check_neigh_update(data);
2265 case NETEVENT_PMTU_UPDATE:
2266 case NETEVENT_REDIRECT:
2273 static bool netevent_registered;
2274 static struct notifier_block cxgb4_netevent_nb = {
2275 .notifier_call = netevent_cb
2278 static void uld_attach(struct adapter *adap, unsigned int uld)
2281 struct cxgb4_lld_info lli;
2283 lli.pdev = adap->pdev;
2284 lli.l2t = adap->l2t;
2285 lli.tids = &adap->tids;
2286 lli.ports = adap->port;
2287 lli.vr = &adap->vres;
2288 lli.mtus = adap->params.mtus;
2289 if (uld == CXGB4_ULD_RDMA) {
2290 lli.rxq_ids = adap->sge.rdma_rxq;
2291 lli.nrxq = adap->sge.rdmaqs;
2292 } else if (uld == CXGB4_ULD_ISCSI) {
2293 lli.rxq_ids = adap->sge.ofld_rxq;
2294 lli.nrxq = adap->sge.ofldqsets;
2296 lli.ntxq = adap->sge.ofldqsets;
2297 lli.nchan = adap->params.nports;
2298 lli.nports = adap->params.nports;
2299 lli.wr_cred = adap->params.ofldq_wr_cred;
2300 lli.adapter_type = adap->params.rev;
2301 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2302 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2303 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
2304 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2305 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
2306 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2307 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2308 lli.fw_vers = adap->params.fw_vers;
2310 handle = ulds[uld].add(&lli);
2311 if (IS_ERR(handle)) {
2312 dev_warn(adap->pdev_dev,
2313 "could not attach to the %s driver, error %ld\n",
2314 uld_str[uld], PTR_ERR(handle));
2318 adap->uld_handle[uld] = handle;
2320 if (!netevent_registered) {
2321 register_netevent_notifier(&cxgb4_netevent_nb);
2322 netevent_registered = true;
2326 static void attach_ulds(struct adapter *adap)
2330 mutex_lock(&uld_mutex);
2331 list_add_tail(&adap->list_node, &adapter_list);
2332 for (i = 0; i < CXGB4_ULD_MAX; i++)
2334 uld_attach(adap, i);
2335 mutex_unlock(&uld_mutex);
2338 static void detach_ulds(struct adapter *adap)
2342 mutex_lock(&uld_mutex);
2343 list_del(&adap->list_node);
2344 for (i = 0; i < CXGB4_ULD_MAX; i++)
2345 if (adap->uld_handle[i]) {
2346 ulds[i].state_change(adap->uld_handle[i],
2347 CXGB4_STATE_DETACH);
2348 adap->uld_handle[i] = NULL;
2350 if (netevent_registered && list_empty(&adapter_list)) {
2351 unregister_netevent_notifier(&cxgb4_netevent_nb);
2352 netevent_registered = false;
2354 mutex_unlock(&uld_mutex);
2357 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2361 mutex_lock(&uld_mutex);
2362 for (i = 0; i < CXGB4_ULD_MAX; i++)
2363 if (adap->uld_handle[i])
2364 ulds[i].state_change(adap->uld_handle[i], new_state);
2365 mutex_unlock(&uld_mutex);
2369 * cxgb4_register_uld - register an upper-layer driver
2370 * @type: the ULD type
2371 * @p: the ULD methods
2373 * Registers an upper-layer driver with this driver and notifies the ULD
2374 * about any presently available devices that support its type. Returns
2375 * %-EBUSY if a ULD of the same type is already registered.
2377 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2380 struct adapter *adap;
2382 if (type >= CXGB4_ULD_MAX)
2384 mutex_lock(&uld_mutex);
2385 if (ulds[type].add) {
2390 list_for_each_entry(adap, &adapter_list, list_node)
2391 uld_attach(adap, type);
2392 out: mutex_unlock(&uld_mutex);
2395 EXPORT_SYMBOL(cxgb4_register_uld);
2398 * cxgb4_unregister_uld - unregister an upper-layer driver
2399 * @type: the ULD type
2401 * Unregisters an existing upper-layer driver.
2403 int cxgb4_unregister_uld(enum cxgb4_uld type)
2405 struct adapter *adap;
2407 if (type >= CXGB4_ULD_MAX)
2409 mutex_lock(&uld_mutex);
2410 list_for_each_entry(adap, &adapter_list, list_node)
2411 adap->uld_handle[type] = NULL;
2412 ulds[type].add = NULL;
2413 mutex_unlock(&uld_mutex);
2416 EXPORT_SYMBOL(cxgb4_unregister_uld);
2419 * cxgb_up - enable the adapter
2420 * @adap: adapter being enabled
2422 * Called when the first port is enabled, this function performs the
2423 * actions necessary to make an adapter operational, such as completing
2424 * the initialization of HW modules, and enabling interrupts.
2426 * Must be called with the rtnl lock held.
2428 static int cxgb_up(struct adapter *adap)
2432 if (!(adap->flags & FULL_INIT_DONE)) {
2433 err = setup_sge_queues(adap);
2436 err = setup_rss(adap);
2438 t4_free_sge_resources(adap);
2441 if (adap->flags & USING_MSIX)
2442 name_msix_vecs(adap);
2443 adap->flags |= FULL_INIT_DONE;
2446 if (adap->flags & USING_MSIX) {
2447 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2448 adap->msix_info[0].desc, adap);
2452 err = request_msix_queue_irqs(adap);
2454 free_irq(adap->msix_info[0].vec, adap);
2458 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2459 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2466 t4_intr_enable(adap);
2467 notify_ulds(adap, CXGB4_STATE_UP);
2471 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2475 static void cxgb_down(struct adapter *adapter)
2477 t4_intr_disable(adapter);
2478 cancel_work_sync(&adapter->tid_release_task);
2479 adapter->tid_release_task_busy = false;
2481 if (adapter->flags & USING_MSIX) {
2482 free_msix_queue_irqs(adapter);
2483 free_irq(adapter->msix_info[0].vec, adapter);
2485 free_irq(adapter->pdev->irq, adapter);
2486 quiesce_rx(adapter);
2490 * net_device operations
2492 static int cxgb_open(struct net_device *dev)
2495 struct port_info *pi = netdev_priv(dev);
2496 struct adapter *adapter = pi->adapter;
2498 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
2501 dev->real_num_tx_queues = pi->nqsets;
2502 set_bit(pi->tx_chan, &adapter->open_device_map);
2504 netif_tx_start_all_queues(dev);
2508 static int cxgb_close(struct net_device *dev)
2511 struct port_info *pi = netdev_priv(dev);
2512 struct adapter *adapter = pi->adapter;
2514 netif_tx_stop_all_queues(dev);
2515 netif_carrier_off(dev);
2516 ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
2518 clear_bit(pi->tx_chan, &adapter->open_device_map);
2520 if (!adapter->open_device_map)
2525 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
2527 struct port_stats stats;
2528 struct port_info *p = netdev_priv(dev);
2529 struct adapter *adapter = p->adapter;
2530 struct net_device_stats *ns = &dev->stats;
2532 spin_lock(&adapter->stats_lock);
2533 t4_get_port_stats(adapter, p->tx_chan, &stats);
2534 spin_unlock(&adapter->stats_lock);
2536 ns->tx_bytes = stats.tx_octets;
2537 ns->tx_packets = stats.tx_frames;
2538 ns->rx_bytes = stats.rx_octets;
2539 ns->rx_packets = stats.rx_frames;
2540 ns->multicast = stats.rx_mcast_frames;
2542 /* detailed rx_errors */
2543 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2545 ns->rx_over_errors = 0;
2546 ns->rx_crc_errors = stats.rx_fcs_err;
2547 ns->rx_frame_errors = stats.rx_symbol_err;
2548 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
2549 stats.rx_ovflow2 + stats.rx_ovflow3 +
2550 stats.rx_trunc0 + stats.rx_trunc1 +
2551 stats.rx_trunc2 + stats.rx_trunc3;
2552 ns->rx_missed_errors = 0;
2554 /* detailed tx_errors */
2555 ns->tx_aborted_errors = 0;
2556 ns->tx_carrier_errors = 0;
2557 ns->tx_fifo_errors = 0;
2558 ns->tx_heartbeat_errors = 0;
2559 ns->tx_window_errors = 0;
2561 ns->tx_errors = stats.tx_error_frames;
2562 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2563 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2567 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2569 int ret = 0, prtad, devad;
2570 struct port_info *pi = netdev_priv(dev);
2571 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2575 if (pi->mdio_addr < 0)
2577 data->phy_id = pi->mdio_addr;
2581 if (mdio_phy_id_is_c45(data->phy_id)) {
2582 prtad = mdio_phy_id_prtad(data->phy_id);
2583 devad = mdio_phy_id_devad(data->phy_id);
2584 } else if (data->phy_id < 32) {
2585 prtad = data->phy_id;
2587 data->reg_num &= 0x1f;
2591 if (cmd == SIOCGMIIREG)
2592 ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
2593 data->reg_num, &data->val_out);
2595 ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
2596 data->reg_num, data->val_in);
2604 static void cxgb_set_rxmode(struct net_device *dev)
2606 /* unfortunately we can't return errors to the stack */
2607 set_rxmode(dev, -1, false);
2610 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2613 struct port_info *pi = netdev_priv(dev);
2615 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2617 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
2624 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2627 struct sockaddr *addr = p;
2628 struct port_info *pi = netdev_priv(dev);
2630 if (!is_valid_ether_addr(addr->sa_data))
2633 ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
2634 addr->sa_data, true, true);
2638 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2639 pi->xact_addr_filt = ret;
2643 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2645 struct port_info *pi = netdev_priv(dev);
2648 t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
2651 #ifdef CONFIG_NET_POLL_CONTROLLER
2652 static void cxgb_netpoll(struct net_device *dev)
2654 struct port_info *pi = netdev_priv(dev);
2655 struct adapter *adap = pi->adapter;
2657 if (adap->flags & USING_MSIX) {
2659 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2661 for (i = pi->nqsets; i; i--, rx++)
2662 t4_sge_intr_msix(0, &rx->rspq);
2664 t4_intr_handler(adap)(0, adap);
2668 static const struct net_device_ops cxgb4_netdev_ops = {
2669 .ndo_open = cxgb_open,
2670 .ndo_stop = cxgb_close,
2671 .ndo_start_xmit = t4_eth_xmit,
2672 .ndo_get_stats = cxgb_get_stats,
2673 .ndo_set_rx_mode = cxgb_set_rxmode,
2674 .ndo_set_mac_address = cxgb_set_mac_addr,
2675 .ndo_validate_addr = eth_validate_addr,
2676 .ndo_do_ioctl = cxgb_ioctl,
2677 .ndo_change_mtu = cxgb_change_mtu,
2678 .ndo_vlan_rx_register = vlan_rx_register,
2679 #ifdef CONFIG_NET_POLL_CONTROLLER
2680 .ndo_poll_controller = cxgb_netpoll,
2684 void t4_fatal_err(struct adapter *adap)
2686 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2687 t4_intr_disable(adap);
2688 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2691 static void setup_memwin(struct adapter *adap)
2695 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
2696 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2697 (bar0 + MEMWIN0_BASE) | BIR(0) |
2698 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2699 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2700 (bar0 + MEMWIN1_BASE) | BIR(0) |
2701 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2702 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2703 (bar0 + MEMWIN2_BASE) | BIR(0) |
2704 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2708 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2710 #define MAX_ATIDS 8192U
2713 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2715 static int adap_init0(struct adapter *adap)
2719 enum dev_state state;
2720 u32 params[7], val[7];
2721 struct fw_caps_config_cmd c;
2723 ret = t4_check_fw_version(adap);
2724 if (ret == -EINVAL || ret > 0) {
2725 if (upgrade_fw(adap) >= 0) /* recache FW version */
2726 ret = t4_check_fw_version(adap);
2731 /* contact FW, request master */
2732 ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
2734 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2740 ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
2744 /* get device capabilities */
2745 memset(&c, 0, sizeof(c));
2746 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2747 FW_CMD_REQUEST | FW_CMD_READ);
2748 c.retval_len16 = htonl(FW_LEN16(c));
2749 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
2753 /* select capabilities we'll be using */
2754 if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2756 c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2758 c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2759 } else if (vf_acls) {
2760 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2763 c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2764 FW_CMD_REQUEST | FW_CMD_WRITE);
2765 ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
2769 ret = t4_config_glbl_rss(adap, 0,
2770 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2771 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2772 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2776 ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
2777 FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2781 for (v = 0; v < SGE_NTIMERS - 1; v++)
2782 adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2783 adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2784 adap->sge.counter_val[0] = 1;
2785 for (v = 1; v < SGE_NCOUNTERS; v++)
2786 adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2790 /* get basic stuff going */
2791 ret = t4_early_init(adap, 0);
2795 #define FW_PARAM_DEV(param) \
2796 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2797 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2799 #define FW_PARAM_PFVF(param) \
2800 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2801 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2803 params[0] = FW_PARAM_DEV(PORTVEC);
2804 params[1] = FW_PARAM_PFVF(L2T_START);
2805 params[2] = FW_PARAM_PFVF(L2T_END);
2806 params[3] = FW_PARAM_PFVF(FILTER_START);
2807 params[4] = FW_PARAM_PFVF(FILTER_END);
2808 ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
2812 adap->tids.ftid_base = val[3];
2813 adap->tids.nftids = val[4] - val[3] + 1;
2816 /* query offload-related parameters */
2817 params[0] = FW_PARAM_DEV(NTID);
2818 params[1] = FW_PARAM_PFVF(SERVER_START);
2819 params[2] = FW_PARAM_PFVF(SERVER_END);
2820 params[3] = FW_PARAM_PFVF(TDDP_START);
2821 params[4] = FW_PARAM_PFVF(TDDP_END);
2822 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
2823 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2826 adap->tids.ntids = val[0];
2827 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
2828 adap->tids.stid_base = val[1];
2829 adap->tids.nstids = val[2] - val[1] + 1;
2830 adap->vres.ddp.start = val[3];
2831 adap->vres.ddp.size = val[4] - val[3] + 1;
2832 adap->params.ofldq_wr_cred = val[5];
2833 adap->params.offload = 1;
2836 params[0] = FW_PARAM_PFVF(STAG_START);
2837 params[1] = FW_PARAM_PFVF(STAG_END);
2838 params[2] = FW_PARAM_PFVF(RQ_START);
2839 params[3] = FW_PARAM_PFVF(RQ_END);
2840 params[4] = FW_PARAM_PFVF(PBL_START);
2841 params[5] = FW_PARAM_PFVF(PBL_END);
2842 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
2845 adap->vres.stag.start = val[0];
2846 adap->vres.stag.size = val[1] - val[0] + 1;
2847 adap->vres.rq.start = val[2];
2848 adap->vres.rq.size = val[3] - val[2] + 1;
2849 adap->vres.pbl.start = val[4];
2850 adap->vres.pbl.size = val[5] - val[4] + 1;
2853 params[0] = FW_PARAM_PFVF(ISCSI_START);
2854 params[1] = FW_PARAM_PFVF(ISCSI_END);
2855 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
2858 adap->vres.iscsi.start = val[0];
2859 adap->vres.iscsi.size = val[1] - val[0] + 1;
2861 #undef FW_PARAM_PFVF
2864 adap->params.nports = hweight32(port_vec);
2865 adap->params.portvec = port_vec;
2866 adap->flags |= FW_OK;
2868 /* These are finalized by FW initialization, load their values now */
2869 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
2870 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
2871 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
2872 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
2873 adap->params.b_wnd);
2875 /* tweak some settings */
2876 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2877 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2878 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2879 v = t4_read_reg(adap, TP_PIO_DATA);
2880 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2885 * If a command timed out or failed with EIO FW does not operate within
2886 * its spec or something catastrophic happened to HW/FW, stop issuing
2889 bye: if (ret != -ETIMEDOUT && ret != -EIO)
2894 static inline bool is_10g_port(const struct link_config *lc)
2896 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
2899 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
2900 unsigned int size, unsigned int iqe_size)
2902 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
2903 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
2904 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
2905 q->iqe_len = iqe_size;
2910 * Perform default configuration of DMA queues depending on the number and type
2911 * of ports we found and the number of available CPUs. Most settings can be
2912 * modified by the admin prior to actual use.
2914 static void __devinit cfg_queues(struct adapter *adap)
2916 struct sge *s = &adap->sge;
2917 int i, q10g = 0, n10g = 0, qidx = 0;
2919 for_each_port(adap, i)
2920 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
2923 * We default to 1 queue per non-10G port and up to # of cores queues
2927 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
2928 if (q10g > num_online_cpus())
2929 q10g = num_online_cpus();
2931 for_each_port(adap, i) {
2932 struct port_info *pi = adap2pinfo(adap, i);
2934 pi->first_qset = qidx;
2935 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
2940 s->max_ethqsets = qidx; /* MSI-X may lower it later */
2942 if (is_offload(adap)) {
2944 * For offload we use 1 queue/channel if all ports are up to 1G,
2945 * otherwise we divide all available queues amongst the channels
2946 * capped by the number of available cores.
2949 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
2951 s->ofldqsets = roundup(i, adap->params.nports);
2953 s->ofldqsets = adap->params.nports;
2954 /* For RDMA one Rx queue per channel suffices */
2955 s->rdmaqs = adap->params.nports;
2958 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
2959 struct sge_eth_rxq *r = &s->ethrxq[i];
2961 init_rspq(&r->rspq, 0, 0, 1024, 64);
2965 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
2966 s->ethtxq[i].q.size = 1024;
2968 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
2969 s->ctrlq[i].q.size = 512;
2971 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
2972 s->ofldtxq[i].q.size = 1024;
2974 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
2975 struct sge_ofld_rxq *r = &s->ofldrxq[i];
2977 init_rspq(&r->rspq, 0, 0, 1024, 64);
2978 r->rspq.uld = CXGB4_ULD_ISCSI;
2982 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
2983 struct sge_ofld_rxq *r = &s->rdmarxq[i];
2985 init_rspq(&r->rspq, 0, 0, 511, 64);
2986 r->rspq.uld = CXGB4_ULD_RDMA;
2990 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
2991 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
2995 * Reduce the number of Ethernet queues across all ports to at most n.
2996 * n provides at least one queue per port.
2998 static void __devinit reduce_ethqs(struct adapter *adap, int n)
3001 struct port_info *pi;
3003 while (n < adap->sge.ethqsets)
3004 for_each_port(adap, i) {
3005 pi = adap2pinfo(adap, i);
3006 if (pi->nqsets > 1) {
3008 adap->sge.ethqsets--;
3009 if (adap->sge.ethqsets <= n)
3015 for_each_port(adap, i) {
3016 pi = adap2pinfo(adap, i);
3022 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3023 #define EXTRA_VECS 2
3025 static int __devinit enable_msix(struct adapter *adap)
3028 int i, err, want, need;
3029 struct sge *s = &adap->sge;
3030 unsigned int nchan = adap->params.nports;
3031 struct msix_entry entries[MAX_INGQ + 1];
3033 for (i = 0; i < ARRAY_SIZE(entries); ++i)
3034 entries[i].entry = i;
3036 want = s->max_ethqsets + EXTRA_VECS;
3037 if (is_offload(adap)) {
3038 want += s->rdmaqs + s->ofldqsets;
3039 /* need nchan for each possible ULD */
3040 ofld_need = 2 * nchan;
3042 need = adap->params.nports + EXTRA_VECS + ofld_need;
3044 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3049 * Distribute available vectors to the various queue groups.
3050 * Every group gets its minimum requirement and NIC gets top
3051 * priority for leftovers.
3053 i = want - EXTRA_VECS - ofld_need;
3054 if (i < s->max_ethqsets) {
3055 s->max_ethqsets = i;
3056 if (i < s->ethqsets)
3057 reduce_ethqs(adap, i);
3059 if (is_offload(adap)) {
3060 i = want - EXTRA_VECS - s->max_ethqsets;
3061 i -= ofld_need - nchan;
3062 s->ofldqsets = (i / nchan) * nchan; /* round down */
3064 for (i = 0; i < want; ++i)
3065 adap->msix_info[i].vec = entries[i].vector;
3067 dev_info(adap->pdev_dev,
3068 "only %d MSI-X vectors left, not using MSI-X\n", err);
3074 static void __devinit print_port_info(struct adapter *adap)
3076 static const char *base[] = {
3077 "R", "KX4", "T", "KX", "T", "KR", "CX4"
3083 for_each_port(adap, i) {
3084 struct net_device *dev = adap->port[i];
3085 const struct port_info *pi = netdev_priv(dev);
3088 if (!test_bit(i, &adap->registered_device_map))
3091 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3092 bufp += sprintf(bufp, "100/");
3093 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3094 bufp += sprintf(bufp, "1000/");
3095 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3096 bufp += sprintf(bufp, "10G/");
3099 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3101 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
3102 adap->params.vpd.id, adap->params.rev,
3103 buf, is_offload(adap) ? "R" : "",
3104 adap->params.pci.width,
3105 (adap->flags & USING_MSIX) ? " MSI-X" :
3106 (adap->flags & USING_MSI) ? " MSI" : "");
3107 if (adap->name == dev->name)
3108 netdev_info(dev, "S/N: %s, E/C: %s\n",
3109 adap->params.vpd.sn, adap->params.vpd.ec);
3113 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
3114 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3116 static int __devinit init_one(struct pci_dev *pdev,
3117 const struct pci_device_id *ent)
3120 struct port_info *pi;
3121 unsigned int highdma = 0;
3122 struct adapter *adapter = NULL;
3124 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3126 err = pci_request_regions(pdev, KBUILD_MODNAME);
3128 /* Just info, some other driver may have claimed the device. */
3129 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3133 /* We control everything through PF 0 */
3134 func = PCI_FUNC(pdev->devfn);
3138 err = pci_enable_device(pdev);
3140 dev_err(&pdev->dev, "cannot enable PCI device\n");
3141 goto out_release_regions;
3144 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3145 highdma = NETIF_F_HIGHDMA;
3146 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3148 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3149 "coherent allocations\n");
3150 goto out_disable_device;
3153 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3155 dev_err(&pdev->dev, "no usable DMA configuration\n");
3156 goto out_disable_device;
3160 pci_enable_pcie_error_reporting(pdev);
3161 pci_set_master(pdev);
3162 pci_save_state(pdev);
3164 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3167 goto out_disable_device;
3170 adapter->regs = pci_ioremap_bar(pdev, 0);
3171 if (!adapter->regs) {
3172 dev_err(&pdev->dev, "cannot map device registers\n");
3174 goto out_free_adapter;
3177 adapter->pdev = pdev;
3178 adapter->pdev_dev = &pdev->dev;
3179 adapter->name = pci_name(pdev);
3180 adapter->msg_enable = dflt_msg_enable;
3181 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3183 spin_lock_init(&adapter->stats_lock);
3184 spin_lock_init(&adapter->tid_release_lock);
3186 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3188 err = t4_prep_adapter(adapter);
3191 err = adap_init0(adapter);
3195 for_each_port(adapter, i) {
3196 struct net_device *netdev;
3198 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3205 SET_NETDEV_DEV(netdev, &pdev->dev);
3207 adapter->port[i] = netdev;
3208 pi = netdev_priv(netdev);
3209 pi->adapter = adapter;
3210 pi->xact_addr_filt = -1;
3211 pi->rx_offload = RX_CSO;
3213 netif_carrier_off(netdev);
3214 netif_tx_stop_all_queues(netdev);
3215 netdev->irq = pdev->irq;
3217 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3218 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3219 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
3220 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3221 netdev->vlan_features = netdev->features & VLAN_FEAT;
3223 netdev->netdev_ops = &cxgb4_netdev_ops;
3224 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3227 pci_set_drvdata(pdev, adapter);
3229 if (adapter->flags & FW_OK) {
3230 err = t4_port_init(adapter, 0, 0, 0);
3236 * Configure queues and allocate tables now, they can be needed as
3237 * soon as the first register_netdev completes.
3239 cfg_queues(adapter);
3241 adapter->l2t = t4_init_l2t();
3242 if (!adapter->l2t) {
3243 /* We tolerate a lack of L2T, giving up some functionality */
3244 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3245 adapter->params.offload = 0;
3248 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3249 dev_warn(&pdev->dev, "could not allocate TID table, "
3251 adapter->params.offload = 0;
3255 * The card is now ready to go. If any errors occur during device
3256 * registration we do not fail the whole card but rather proceed only
3257 * with the ports we manage to register successfully. However we must
3258 * register at least one net device.
3260 for_each_port(adapter, i) {
3261 err = register_netdev(adapter->port[i]);
3263 dev_warn(&pdev->dev,
3264 "cannot register net device %s, skipping\n",
3265 adapter->port[i]->name);
3268 * Change the name we use for messages to the name of
3269 * the first successfully registered interface.
3271 if (!adapter->registered_device_map)
3272 adapter->name = adapter->port[i]->name;
3274 __set_bit(i, &adapter->registered_device_map);
3275 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3278 if (!adapter->registered_device_map) {
3279 dev_err(&pdev->dev, "could not register any net devices\n");
3283 if (cxgb4_debugfs_root) {
3284 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3285 cxgb4_debugfs_root);
3286 setup_debugfs(adapter);
3289 /* See what interrupts we'll be using */
3290 if (msi > 1 && enable_msix(adapter) == 0)
3291 adapter->flags |= USING_MSIX;
3292 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3293 adapter->flags |= USING_MSI;
3295 if (is_offload(adapter))
3296 attach_ulds(adapter);
3298 print_port_info(adapter);
3301 #ifdef CONFIG_PCI_IOV
3302 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3303 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3304 dev_info(&pdev->dev,
3305 "instantiated %u virtual functions\n",
3311 t4_free_mem(adapter->tids.tid_tab);
3312 t4_free_mem(adapter->l2t);
3313 for_each_port(adapter, i)
3314 if (adapter->port[i])
3315 free_netdev(adapter->port[i]);
3316 if (adapter->flags & FW_OK)
3317 t4_fw_bye(adapter, 0);
3319 iounmap(adapter->regs);
3323 pci_disable_pcie_error_reporting(pdev);
3324 pci_disable_device(pdev);
3325 out_release_regions:
3326 pci_release_regions(pdev);
3327 pci_set_drvdata(pdev, NULL);
3331 static void __devexit remove_one(struct pci_dev *pdev)
3333 struct adapter *adapter = pci_get_drvdata(pdev);
3335 pci_disable_sriov(pdev);
3340 if (is_offload(adapter))
3341 detach_ulds(adapter);
3343 for_each_port(adapter, i)
3344 if (test_bit(i, &adapter->registered_device_map))
3345 unregister_netdev(adapter->port[i]);
3347 if (adapter->debugfs_root)
3348 debugfs_remove_recursive(adapter->debugfs_root);
3350 t4_sge_stop(adapter);
3351 t4_free_sge_resources(adapter);
3352 t4_free_mem(adapter->l2t);
3353 t4_free_mem(adapter->tids.tid_tab);
3354 disable_msi(adapter);
3356 for_each_port(adapter, i)
3357 if (adapter->port[i])
3358 free_netdev(adapter->port[i]);
3360 if (adapter->flags & FW_OK)
3361 t4_fw_bye(adapter, 0);
3362 iounmap(adapter->regs);
3364 pci_disable_pcie_error_reporting(pdev);
3365 pci_disable_device(pdev);
3366 pci_release_regions(pdev);
3367 pci_set_drvdata(pdev, NULL);
3368 } else if (PCI_FUNC(pdev->devfn) > 0)
3369 pci_release_regions(pdev);
3372 static struct pci_driver cxgb4_driver = {
3373 .name = KBUILD_MODNAME,
3374 .id_table = cxgb4_pci_tbl,
3376 .remove = __devexit_p(remove_one),
3379 static int __init cxgb4_init_module(void)
3383 /* Debugfs support is optional, just warn if this fails */
3384 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3385 if (!cxgb4_debugfs_root)
3386 pr_warning("could not create debugfs entry, continuing\n");
3388 ret = pci_register_driver(&cxgb4_driver);
3390 debugfs_remove(cxgb4_debugfs_root);
3394 static void __exit cxgb4_cleanup_module(void)
3396 pci_unregister_driver(&cxgb4_driver);
3397 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
3400 module_init(cxgb4_init_module);
3401 module_exit(cxgb4_cleanup_module);