2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
196 if (link_stat != netif_carrier_ok(dev)) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
212 * t3_os_phymod_changed - handle PHY module changes
213 * @phy: the PHY reporting the module change
214 * @mod_type: new module type
216 * This is the OS-dependent handler for PHY module changes. It is
217 * invoked when a PHY module is removed or inserted for any OS-specific
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
222 static const char *mod_str[] = {
223 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
226 const struct net_device *dev = adap->port[port_id];
227 const struct port_info *pi = netdev_priv(dev);
229 if (pi->phy.modtype == phy_modtype_none)
230 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
232 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233 mod_str[pi->phy.modtype]);
236 static void cxgb_set_rxmode(struct net_device *dev)
238 struct t3_rx_mode rm;
239 struct port_info *pi = netdev_priv(dev);
241 init_rx_mode(&rm, dev, dev->mc_list);
242 t3_mac_set_rx_mode(&pi->mac, &rm);
246 * link_start - enable a port
247 * @dev: the device to enable
249 * Performs the MAC and PHY actions needed to enable a port.
251 static void link_start(struct net_device *dev)
253 struct t3_rx_mode rm;
254 struct port_info *pi = netdev_priv(dev);
255 struct cmac *mac = &pi->mac;
257 init_rx_mode(&rm, dev, dev->mc_list);
259 t3_mac_set_mtu(mac, dev->mtu);
260 t3_mac_set_address(mac, 0, dev->dev_addr);
261 t3_mac_set_rx_mode(mac, &rm);
262 t3_link_start(&pi->phy, mac, &pi->link_config);
263 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
266 static inline void cxgb_disable_msi(struct adapter *adapter)
268 if (adapter->flags & USING_MSIX) {
269 pci_disable_msix(adapter->pdev);
270 adapter->flags &= ~USING_MSIX;
271 } else if (adapter->flags & USING_MSI) {
272 pci_disable_msi(adapter->pdev);
273 adapter->flags &= ~USING_MSI;
278 * Interrupt handler for asynchronous events used with MSI-X.
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
282 t3_slow_intr_handler(cookie);
287 * Name the MSI-X interrupts.
289 static void name_msix_vecs(struct adapter *adap)
291 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
293 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294 adap->msix_info[0].desc[n] = 0;
296 for_each_port(adap, j) {
297 struct net_device *d = adap->port[j];
298 const struct port_info *pi = netdev_priv(d);
300 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301 snprintf(adap->msix_info[msi_idx].desc, n,
302 "%s-%d", d->name, pi->first_qset + i);
303 adap->msix_info[msi_idx].desc[n] = 0;
308 static int request_msix_data_irqs(struct adapter *adap)
310 int i, j, err, qidx = 0;
312 for_each_port(adap, i) {
313 int nqsets = adap2pinfo(adap, i)->nqsets;
315 for (j = 0; j < nqsets; ++j) {
316 err = request_irq(adap->msix_info[qidx + 1].vec,
317 t3_intr_handler(adap,
320 adap->msix_info[qidx + 1].desc,
321 &adap->sge.qs[qidx]);
324 free_irq(adap->msix_info[qidx + 1].vec,
325 &adap->sge.qs[qidx]);
334 static void free_irq_resources(struct adapter *adapter)
336 if (adapter->flags & USING_MSIX) {
339 free_irq(adapter->msix_info[0].vec, adapter);
340 for_each_port(adapter, i)
341 n += adap2pinfo(adapter, i)->nqsets;
343 for (i = 0; i < n; ++i)
344 free_irq(adapter->msix_info[i + 1].vec,
345 &adapter->sge.qs[i]);
347 free_irq(adapter->pdev->irq, adapter);
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
355 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
363 static int init_tp_parity(struct adapter *adap)
367 struct cpl_set_tcb_field *greq;
368 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
370 t3_tp_set_offload_mode(adap, 1);
372 for (i = 0; i < 16; i++) {
373 struct cpl_smt_write_req *req;
375 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377 memset(req, 0, sizeof(*req));
378 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
381 t3_mgmt_tx(adap, skb);
384 for (i = 0; i < 2048; i++) {
385 struct cpl_l2t_write_req *req;
387 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389 memset(req, 0, sizeof(*req));
390 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392 req->params = htonl(V_L2T_W_IDX(i));
393 t3_mgmt_tx(adap, skb);
396 for (i = 0; i < 2048; i++) {
397 struct cpl_rte_write_req *req;
399 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401 memset(req, 0, sizeof(*req));
402 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405 t3_mgmt_tx(adap, skb);
408 skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409 greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410 memset(greq, 0, sizeof(*greq));
411 greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412 OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413 greq->mask = cpu_to_be64(1);
414 t3_mgmt_tx(adap, skb);
416 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417 t3_tp_set_offload_mode(adap, 0);
422 * setup_rss - configure RSS
425 * Sets up RSS to distribute packets to multiple receive queues. We
426 * configure the RSS CPU lookup table to distribute to the number of HW
427 * receive queues, and the response queue lookup table to narrow that
428 * down to the response queues actually configured for each port.
429 * We always configure the RSS mapping for two ports since the mapping
430 * table has plenty of entries.
432 static void setup_rss(struct adapter *adap)
435 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437 u8 cpus[SGE_QSETS + 1];
438 u16 rspq_map[RSS_TABLE_SIZE];
440 for (i = 0; i < SGE_QSETS; ++i)
442 cpus[SGE_QSETS] = 0xff; /* terminator */
444 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445 rspq_map[i] = i % nq0;
446 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
449 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
454 static void init_napi(struct adapter *adap)
458 for (i = 0; i < SGE_QSETS; i++) {
459 struct sge_qset *qs = &adap->sge.qs[i];
462 netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
467 * netif_napi_add() can be called only once per napi_struct because it
468 * adds each new napi_struct to a list. Be careful not to call it a
469 * second time, e.g., during EEH recovery, by making a note of it.
471 adap->flags |= NAPI_INIT;
475 * Wait until all NAPI handlers are descheduled. This includes the handlers of
476 * both netdevices representing interfaces and the dummy ones for the extra
479 static void quiesce_rx(struct adapter *adap)
483 for (i = 0; i < SGE_QSETS; i++)
484 if (adap->sge.qs[i].adap)
485 napi_disable(&adap->sge.qs[i].napi);
488 static void enable_all_napi(struct adapter *adap)
491 for (i = 0; i < SGE_QSETS; i++)
492 if (adap->sge.qs[i].adap)
493 napi_enable(&adap->sge.qs[i].napi);
497 * set_qset_lro - Turn a queue set's LRO capability on and off
498 * @dev: the device the qset is attached to
499 * @qset_idx: the queue set index
500 * @val: the LRO switch
502 * Sets LRO on or off for a particular queue set.
503 * the device's features flag is updated to reflect the LRO
504 * capability when all queues belonging to the device are
507 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
509 struct port_info *pi = netdev_priv(dev);
510 struct adapter *adapter = pi->adapter;
513 adapter->params.sge.qset[qset_idx].lro = !!val;
514 adapter->sge.qs[qset_idx].lro_enabled = !!val;
516 /* let ethtool report LRO on only if all queues are LRO enabled */
517 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518 lro_on &= adapter->params.sge.qset[i].lro;
521 dev->features |= NETIF_F_LRO;
523 dev->features &= ~NETIF_F_LRO;
527 * setup_sge_qsets - configure SGE Tx/Rx/response queues
530 * Determines how many sets of SGE queues to use and initializes them.
531 * We support multiple queue sets per port if we have MSI-X, otherwise
532 * just one queue set per port.
534 static int setup_sge_qsets(struct adapter *adap)
536 int i, j, err, irq_idx = 0, qset_idx = 0;
537 unsigned int ntxq = SGE_TXQ_PER_SET;
539 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
542 for_each_port(adap, i) {
543 struct net_device *dev = adap->port[i];
544 struct port_info *pi = netdev_priv(dev);
546 pi->qs = &adap->sge.qs[pi->first_qset];
547 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
549 set_qset_lro(dev, qset_idx, pi->rx_csum_offload);
550 err = t3_sge_alloc_qset(adap, qset_idx, 1,
551 (adap->flags & USING_MSIX) ? qset_idx + 1 :
553 &adap->params.sge.qset[qset_idx], ntxq, dev,
554 netdev_get_tx_queue(dev, j));
556 t3_stop_sge_timers(adap);
557 t3_free_sge_resources(adap);
566 static ssize_t attr_show(struct device *d, char *buf,
567 ssize_t(*format) (struct net_device *, char *))
571 /* Synchronize with ioctls that may shut down the device */
573 len = (*format) (to_net_dev(d), buf);
578 static ssize_t attr_store(struct device *d,
579 const char *buf, size_t len,
580 ssize_t(*set) (struct net_device *, unsigned int),
581 unsigned int min_val, unsigned int max_val)
587 if (!capable(CAP_NET_ADMIN))
590 val = simple_strtoul(buf, &endp, 0);
591 if (endp == buf || val < min_val || val > max_val)
595 ret = (*set) (to_net_dev(d), val);
602 #define CXGB3_SHOW(name, val_expr) \
603 static ssize_t format_##name(struct net_device *dev, char *buf) \
605 struct port_info *pi = netdev_priv(dev); \
606 struct adapter *adap = pi->adapter; \
607 return sprintf(buf, "%u\n", val_expr); \
609 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
612 return attr_show(d, buf, format_##name); \
615 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
617 struct port_info *pi = netdev_priv(dev);
618 struct adapter *adap = pi->adapter;
619 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
621 if (adap->flags & FULL_INIT_DONE)
623 if (val && adap->params.rev == 0)
625 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
628 adap->params.mc5.nfilters = val;
632 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
633 const char *buf, size_t len)
635 return attr_store(d, buf, len, set_nfilters, 0, ~0);
638 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
640 struct port_info *pi = netdev_priv(dev);
641 struct adapter *adap = pi->adapter;
643 if (adap->flags & FULL_INIT_DONE)
645 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
648 adap->params.mc5.nservers = val;
652 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
653 const char *buf, size_t len)
655 return attr_store(d, buf, len, set_nservers, 0, ~0);
658 #define CXGB3_ATTR_R(name, val_expr) \
659 CXGB3_SHOW(name, val_expr) \
660 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
662 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
663 CXGB3_SHOW(name, val_expr) \
664 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
666 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
667 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
668 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
670 static struct attribute *cxgb3_attrs[] = {
671 &dev_attr_cam_size.attr,
672 &dev_attr_nfilters.attr,
673 &dev_attr_nservers.attr,
677 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
679 static ssize_t tm_attr_show(struct device *d,
680 char *buf, int sched)
682 struct port_info *pi = netdev_priv(to_net_dev(d));
683 struct adapter *adap = pi->adapter;
684 unsigned int v, addr, bpt, cpt;
687 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
689 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
690 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
693 bpt = (v >> 8) & 0xff;
696 len = sprintf(buf, "disabled\n");
698 v = (adap->params.vpd.cclk * 1000) / cpt;
699 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
705 static ssize_t tm_attr_store(struct device *d,
706 const char *buf, size_t len, int sched)
708 struct port_info *pi = netdev_priv(to_net_dev(d));
709 struct adapter *adap = pi->adapter;
714 if (!capable(CAP_NET_ADMIN))
717 val = simple_strtoul(buf, &endp, 0);
718 if (endp == buf || val > 10000000)
722 ret = t3_config_sched(adap, val, sched);
729 #define TM_ATTR(name, sched) \
730 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
733 return tm_attr_show(d, buf, sched); \
735 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
736 const char *buf, size_t len) \
738 return tm_attr_store(d, buf, len, sched); \
740 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
751 static struct attribute *offload_attrs[] = {
752 &dev_attr_sched0.attr,
753 &dev_attr_sched1.attr,
754 &dev_attr_sched2.attr,
755 &dev_attr_sched3.attr,
756 &dev_attr_sched4.attr,
757 &dev_attr_sched5.attr,
758 &dev_attr_sched6.attr,
759 &dev_attr_sched7.attr,
763 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
766 * Sends an sk_buff to an offload queue driver
767 * after dealing with any active network taps.
769 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
774 ret = t3_offload_tx(tdev, skb);
779 static int write_smt_entry(struct adapter *adapter, int idx)
781 struct cpl_smt_write_req *req;
782 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
787 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
788 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
789 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
790 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
792 memset(req->src_mac1, 0, sizeof(req->src_mac1));
793 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
795 offload_tx(&adapter->tdev, skb);
799 static int init_smt(struct adapter *adapter)
803 for_each_port(adapter, i)
804 write_smt_entry(adapter, i);
808 static void init_port_mtus(struct adapter *adapter)
810 unsigned int mtus = adapter->port[0]->mtu;
812 if (adapter->port[1])
813 mtus |= adapter->port[1]->mtu << 16;
814 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
817 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
821 struct mngt_pktsched_wr *req;
824 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
825 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
826 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
827 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
833 ret = t3_mgmt_tx(adap, skb);
838 static int bind_qsets(struct adapter *adap)
842 for_each_port(adap, i) {
843 const struct port_info *pi = adap2pinfo(adap, i);
845 for (j = 0; j < pi->nqsets; ++j) {
846 int ret = send_pktsched_cmd(adap, 1,
847 pi->first_qset + j, -1,
857 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
858 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
860 static int upgrade_fw(struct adapter *adap)
864 const struct firmware *fw;
865 struct device *dev = &adap->pdev->dev;
867 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
868 FW_VERSION_MINOR, FW_VERSION_MICRO);
869 ret = request_firmware(&fw, buf, dev);
871 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
875 ret = t3_load_fw(adap, fw->data, fw->size);
876 release_firmware(fw);
879 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
880 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
882 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
883 FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
888 static inline char t3rev2char(struct adapter *adapter)
892 switch(adapter->params.rev) {
904 static int update_tpsram(struct adapter *adap)
906 const struct firmware *tpsram;
908 struct device *dev = &adap->pdev->dev;
912 rev = t3rev2char(adap);
916 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
917 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
919 ret = request_firmware(&tpsram, buf, dev);
921 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
926 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
930 ret = t3_set_proto_sram(adap, tpsram->data);
933 "successful update of protocol engine "
935 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
937 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
938 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
940 dev_err(dev, "loading protocol SRAM failed\n");
943 release_firmware(tpsram);
949 * cxgb_up - enable the adapter
950 * @adapter: adapter being enabled
952 * Called when the first port is enabled, this function performs the
953 * actions necessary to make an adapter operational, such as completing
954 * the initialization of HW modules, and enabling interrupts.
956 * Must be called with the rtnl lock held.
958 static int cxgb_up(struct adapter *adap)
963 if (!(adap->flags & FULL_INIT_DONE)) {
964 err = t3_check_fw_version(adap, &must_load);
965 if (err == -EINVAL) {
966 err = upgrade_fw(adap);
967 if (err && must_load)
971 err = t3_check_tpsram_version(adap, &must_load);
972 if (err == -EINVAL) {
973 err = update_tpsram(adap);
974 if (err && must_load)
979 * Clear interrupts now to catch errors if t3_init_hw fails.
980 * We clear them again later as initialization may trigger
981 * conditions that can interrupt.
985 err = t3_init_hw(adap, 0);
989 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
990 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
992 err = setup_sge_qsets(adap);
997 if (!(adap->flags & NAPI_INIT))
999 adap->flags |= FULL_INIT_DONE;
1002 t3_intr_clear(adap);
1004 if (adap->flags & USING_MSIX) {
1005 name_msix_vecs(adap);
1006 err = request_irq(adap->msix_info[0].vec,
1007 t3_async_intr_handler, 0,
1008 adap->msix_info[0].desc, adap);
1012 err = request_msix_data_irqs(adap);
1014 free_irq(adap->msix_info[0].vec, adap);
1017 } else if ((err = request_irq(adap->pdev->irq,
1018 t3_intr_handler(adap,
1019 adap->sge.qs[0].rspq.
1021 (adap->flags & USING_MSI) ?
1026 enable_all_napi(adap);
1028 t3_intr_enable(adap);
1030 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1031 is_offload(adap) && init_tp_parity(adap) == 0)
1032 adap->flags |= TP_PARITY_INIT;
1034 if (adap->flags & TP_PARITY_INIT) {
1035 t3_write_reg(adap, A_TP_INT_CAUSE,
1036 F_CMCACHEPERR | F_ARPLUTPERR);
1037 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1040 if (!(adap->flags & QUEUES_BOUND)) {
1041 err = bind_qsets(adap);
1043 CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1044 t3_intr_disable(adap);
1045 free_irq_resources(adap);
1048 adap->flags |= QUEUES_BOUND;
1054 CH_ERR(adap, "request_irq failed, err %d\n", err);
1059 * Release resources when all the ports and offloading have been stopped.
1061 static void cxgb_down(struct adapter *adapter)
1063 t3_sge_stop(adapter);
1064 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
1065 t3_intr_disable(adapter);
1066 spin_unlock_irq(&adapter->work_lock);
1068 free_irq_resources(adapter);
1069 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
1070 quiesce_rx(adapter);
1073 static void schedule_chk_task(struct adapter *adap)
1077 timeo = adap->params.linkpoll_period ?
1078 (HZ * adap->params.linkpoll_period) / 10 :
1079 adap->params.stats_update_period * HZ;
1081 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1084 static int offload_open(struct net_device *dev)
1086 struct port_info *pi = netdev_priv(dev);
1087 struct adapter *adapter = pi->adapter;
1088 struct t3cdev *tdev = dev2t3cdev(dev);
1089 int adap_up = adapter->open_device_map & PORT_MASK;
1092 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1095 if (!adap_up && (err = cxgb_up(adapter)) < 0)
1098 t3_tp_set_offload_mode(adapter, 1);
1099 tdev->lldev = adapter->port[0];
1100 err = cxgb3_offload_activate(adapter);
1104 init_port_mtus(adapter);
1105 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1106 adapter->params.b_wnd,
1107 adapter->params.rev == 0 ?
1108 adapter->port[0]->mtu : 0xffff);
1111 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1112 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1114 /* Call back all registered clients */
1115 cxgb3_add_clients(tdev);
1118 /* restore them in case the offload module has changed them */
1120 t3_tp_set_offload_mode(adapter, 0);
1121 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1122 cxgb3_set_dummy_ops(tdev);
1127 static int offload_close(struct t3cdev *tdev)
1129 struct adapter *adapter = tdev2adap(tdev);
1131 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1134 /* Call back all registered clients */
1135 cxgb3_remove_clients(tdev);
1137 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1140 cxgb3_set_dummy_ops(tdev);
1141 t3_tp_set_offload_mode(adapter, 0);
1142 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1144 if (!adapter->open_device_map)
1147 cxgb3_offload_deactivate(adapter);
1151 static int cxgb_open(struct net_device *dev)
1153 struct port_info *pi = netdev_priv(dev);
1154 struct adapter *adapter = pi->adapter;
1155 int other_ports = adapter->open_device_map & PORT_MASK;
1158 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1161 set_bit(pi->port_id, &adapter->open_device_map);
1162 if (is_offload(adapter) && !ofld_disable) {
1163 err = offload_open(dev);
1166 "Could not initialize offload capabilities\n");
1169 dev->real_num_tx_queues = pi->nqsets;
1171 t3_port_intr_enable(adapter, pi->port_id);
1172 netif_tx_start_all_queues(dev);
1174 schedule_chk_task(adapter);
1179 static int cxgb_close(struct net_device *dev)
1181 struct port_info *pi = netdev_priv(dev);
1182 struct adapter *adapter = pi->adapter;
1184 t3_port_intr_disable(adapter, pi->port_id);
1185 netif_tx_stop_all_queues(dev);
1186 pi->phy.ops->power_down(&pi->phy, 1);
1187 netif_carrier_off(dev);
1188 t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1190 spin_lock_irq(&adapter->work_lock); /* sync with update task */
1191 clear_bit(pi->port_id, &adapter->open_device_map);
1192 spin_unlock_irq(&adapter->work_lock);
1194 if (!(adapter->open_device_map & PORT_MASK))
1195 cancel_rearming_delayed_workqueue(cxgb3_wq,
1196 &adapter->adap_check_task);
1198 if (!adapter->open_device_map)
1204 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1206 struct port_info *pi = netdev_priv(dev);
1207 struct adapter *adapter = pi->adapter;
1208 struct net_device_stats *ns = &pi->netstats;
1209 const struct mac_stats *pstats;
1211 spin_lock(&adapter->stats_lock);
1212 pstats = t3_mac_update_stats(&pi->mac);
1213 spin_unlock(&adapter->stats_lock);
1215 ns->tx_bytes = pstats->tx_octets;
1216 ns->tx_packets = pstats->tx_frames;
1217 ns->rx_bytes = pstats->rx_octets;
1218 ns->rx_packets = pstats->rx_frames;
1219 ns->multicast = pstats->rx_mcast_frames;
1221 ns->tx_errors = pstats->tx_underrun;
1222 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1223 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1224 pstats->rx_fifo_ovfl;
1226 /* detailed rx_errors */
1227 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1228 ns->rx_over_errors = 0;
1229 ns->rx_crc_errors = pstats->rx_fcs_errs;
1230 ns->rx_frame_errors = pstats->rx_symbol_errs;
1231 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1232 ns->rx_missed_errors = pstats->rx_cong_drops;
1234 /* detailed tx_errors */
1235 ns->tx_aborted_errors = 0;
1236 ns->tx_carrier_errors = 0;
1237 ns->tx_fifo_errors = pstats->tx_underrun;
1238 ns->tx_heartbeat_errors = 0;
1239 ns->tx_window_errors = 0;
1243 static u32 get_msglevel(struct net_device *dev)
1245 struct port_info *pi = netdev_priv(dev);
1246 struct adapter *adapter = pi->adapter;
1248 return adapter->msg_enable;
1251 static void set_msglevel(struct net_device *dev, u32 val)
1253 struct port_info *pi = netdev_priv(dev);
1254 struct adapter *adapter = pi->adapter;
1256 adapter->msg_enable = val;
1259 static char stats_strings[][ETH_GSTRING_LEN] = {
1262 "TxMulticastFramesOK",
1263 "TxBroadcastFramesOK",
1270 "TxFrames128To255 ",
1271 "TxFrames256To511 ",
1272 "TxFrames512To1023 ",
1273 "TxFrames1024To1518 ",
1274 "TxFrames1519ToMax ",
1278 "RxMulticastFramesOK",
1279 "RxBroadcastFramesOK",
1290 "RxFrames128To255 ",
1291 "RxFrames256To511 ",
1292 "RxFrames512To1023 ",
1293 "RxFrames1024To1518 ",
1294 "RxFrames1519ToMax ",
1307 "CheckTXEnToggled ",
1312 static int get_sset_count(struct net_device *dev, int sset)
1316 return ARRAY_SIZE(stats_strings);
1322 #define T3_REGMAP_SIZE (3 * 1024)
1324 static int get_regs_len(struct net_device *dev)
1326 return T3_REGMAP_SIZE;
1329 static int get_eeprom_len(struct net_device *dev)
1334 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1336 struct port_info *pi = netdev_priv(dev);
1337 struct adapter *adapter = pi->adapter;
1341 spin_lock(&adapter->stats_lock);
1342 t3_get_fw_version(adapter, &fw_vers);
1343 t3_get_tp_version(adapter, &tp_vers);
1344 spin_unlock(&adapter->stats_lock);
1346 strcpy(info->driver, DRV_NAME);
1347 strcpy(info->version, DRV_VERSION);
1348 strcpy(info->bus_info, pci_name(adapter->pdev));
1350 strcpy(info->fw_version, "N/A");
1352 snprintf(info->fw_version, sizeof(info->fw_version),
1353 "%s %u.%u.%u TP %u.%u.%u",
1354 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1355 G_FW_VERSION_MAJOR(fw_vers),
1356 G_FW_VERSION_MINOR(fw_vers),
1357 G_FW_VERSION_MICRO(fw_vers),
1358 G_TP_VERSION_MAJOR(tp_vers),
1359 G_TP_VERSION_MINOR(tp_vers),
1360 G_TP_VERSION_MICRO(tp_vers));
1364 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1366 if (stringset == ETH_SS_STATS)
1367 memcpy(data, stats_strings, sizeof(stats_strings));
1370 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1371 struct port_info *p, int idx)
1374 unsigned long tot = 0;
1376 for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1377 tot += adapter->sge.qs[i].port_stats[idx];
1381 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1384 struct port_info *pi = netdev_priv(dev);
1385 struct adapter *adapter = pi->adapter;
1386 const struct mac_stats *s;
1388 spin_lock(&adapter->stats_lock);
1389 s = t3_mac_update_stats(&pi->mac);
1390 spin_unlock(&adapter->stats_lock);
1392 *data++ = s->tx_octets;
1393 *data++ = s->tx_frames;
1394 *data++ = s->tx_mcast_frames;
1395 *data++ = s->tx_bcast_frames;
1396 *data++ = s->tx_pause;
1397 *data++ = s->tx_underrun;
1398 *data++ = s->tx_fifo_urun;
1400 *data++ = s->tx_frames_64;
1401 *data++ = s->tx_frames_65_127;
1402 *data++ = s->tx_frames_128_255;
1403 *data++ = s->tx_frames_256_511;
1404 *data++ = s->tx_frames_512_1023;
1405 *data++ = s->tx_frames_1024_1518;
1406 *data++ = s->tx_frames_1519_max;
1408 *data++ = s->rx_octets;
1409 *data++ = s->rx_frames;
1410 *data++ = s->rx_mcast_frames;
1411 *data++ = s->rx_bcast_frames;
1412 *data++ = s->rx_pause;
1413 *data++ = s->rx_fcs_errs;
1414 *data++ = s->rx_symbol_errs;
1415 *data++ = s->rx_short;
1416 *data++ = s->rx_jabber;
1417 *data++ = s->rx_too_long;
1418 *data++ = s->rx_fifo_ovfl;
1420 *data++ = s->rx_frames_64;
1421 *data++ = s->rx_frames_65_127;
1422 *data++ = s->rx_frames_128_255;
1423 *data++ = s->rx_frames_256_511;
1424 *data++ = s->rx_frames_512_1023;
1425 *data++ = s->rx_frames_1024_1518;
1426 *data++ = s->rx_frames_1519_max;
1428 *data++ = pi->phy.fifo_errors;
1430 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1431 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1432 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1433 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1434 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1435 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1436 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1437 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1438 *data++ = s->rx_cong_drops;
1440 *data++ = s->num_toggled;
1441 *data++ = s->num_resets;
1444 static inline void reg_block_dump(struct adapter *ap, void *buf,
1445 unsigned int start, unsigned int end)
1447 u32 *p = buf + start;
1449 for (; start <= end; start += sizeof(u32))
1450 *p++ = t3_read_reg(ap, start);
1453 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1456 struct port_info *pi = netdev_priv(dev);
1457 struct adapter *ap = pi->adapter;
1461 * bits 0..9: chip version
1462 * bits 10..15: chip revision
1463 * bit 31: set for PCIe cards
1465 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1468 * We skip the MAC statistics registers because they are clear-on-read.
1469 * Also reading multi-register stats would need to synchronize with the
1470 * periodic mac stats accumulation. Hard to justify the complexity.
1472 memset(buf, 0, T3_REGMAP_SIZE);
1473 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1474 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1475 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1476 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1477 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1478 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1479 XGM_REG(A_XGM_SERDES_STAT3, 1));
1480 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1481 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1484 static int restart_autoneg(struct net_device *dev)
1486 struct port_info *p = netdev_priv(dev);
1488 if (!netif_running(dev))
1490 if (p->link_config.autoneg != AUTONEG_ENABLE)
1492 p->phy.ops->autoneg_restart(&p->phy);
1496 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1498 struct port_info *pi = netdev_priv(dev);
1499 struct adapter *adapter = pi->adapter;
1505 for (i = 0; i < data * 2; i++) {
1506 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1507 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1508 if (msleep_interruptible(500))
1511 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1516 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1518 struct port_info *p = netdev_priv(dev);
1520 cmd->supported = p->link_config.supported;
1521 cmd->advertising = p->link_config.advertising;
1523 if (netif_carrier_ok(dev)) {
1524 cmd->speed = p->link_config.speed;
1525 cmd->duplex = p->link_config.duplex;
1531 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1532 cmd->phy_address = p->phy.addr;
1533 cmd->transceiver = XCVR_EXTERNAL;
1534 cmd->autoneg = p->link_config.autoneg;
1540 static int speed_duplex_to_caps(int speed, int duplex)
1546 if (duplex == DUPLEX_FULL)
1547 cap = SUPPORTED_10baseT_Full;
1549 cap = SUPPORTED_10baseT_Half;
1552 if (duplex == DUPLEX_FULL)
1553 cap = SUPPORTED_100baseT_Full;
1555 cap = SUPPORTED_100baseT_Half;
1558 if (duplex == DUPLEX_FULL)
1559 cap = SUPPORTED_1000baseT_Full;
1561 cap = SUPPORTED_1000baseT_Half;
1564 if (duplex == DUPLEX_FULL)
1565 cap = SUPPORTED_10000baseT_Full;
1570 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1571 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1572 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1573 ADVERTISED_10000baseT_Full)
1575 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1578 struct port_info *p = netdev_priv(dev);
1579 struct link_config *lc = &p->link_config;
1581 if (!(lc->supported & SUPPORTED_Autoneg)) {
1583 * PHY offers a single speed/duplex. See if that's what's
1586 if (cmd->autoneg == AUTONEG_DISABLE) {
1587 cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1588 if (lc->supported & cap)
1594 if (cmd->autoneg == AUTONEG_DISABLE) {
1595 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1597 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1599 lc->requested_speed = cmd->speed;
1600 lc->requested_duplex = cmd->duplex;
1601 lc->advertising = 0;
1603 cmd->advertising &= ADVERTISED_MASK;
1604 cmd->advertising &= lc->supported;
1605 if (!cmd->advertising)
1607 lc->requested_speed = SPEED_INVALID;
1608 lc->requested_duplex = DUPLEX_INVALID;
1609 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1611 lc->autoneg = cmd->autoneg;
1612 if (netif_running(dev))
1613 t3_link_start(&p->phy, &p->mac, lc);
1617 static void get_pauseparam(struct net_device *dev,
1618 struct ethtool_pauseparam *epause)
1620 struct port_info *p = netdev_priv(dev);
1622 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1623 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1624 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1627 static int set_pauseparam(struct net_device *dev,
1628 struct ethtool_pauseparam *epause)
1630 struct port_info *p = netdev_priv(dev);
1631 struct link_config *lc = &p->link_config;
1633 if (epause->autoneg == AUTONEG_DISABLE)
1634 lc->requested_fc = 0;
1635 else if (lc->supported & SUPPORTED_Autoneg)
1636 lc->requested_fc = PAUSE_AUTONEG;
1640 if (epause->rx_pause)
1641 lc->requested_fc |= PAUSE_RX;
1642 if (epause->tx_pause)
1643 lc->requested_fc |= PAUSE_TX;
1644 if (lc->autoneg == AUTONEG_ENABLE) {
1645 if (netif_running(dev))
1646 t3_link_start(&p->phy, &p->mac, lc);
1648 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1649 if (netif_running(dev))
1650 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1655 static u32 get_rx_csum(struct net_device *dev)
1657 struct port_info *p = netdev_priv(dev);
1659 return p->rx_csum_offload;
1662 static int set_rx_csum(struct net_device *dev, u32 data)
1664 struct port_info *p = netdev_priv(dev);
1666 p->rx_csum_offload = data;
1670 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1671 set_qset_lro(dev, i, 0);
1676 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1678 struct port_info *pi = netdev_priv(dev);
1679 struct adapter *adapter = pi->adapter;
1680 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1682 e->rx_max_pending = MAX_RX_BUFFERS;
1683 e->rx_mini_max_pending = 0;
1684 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1685 e->tx_max_pending = MAX_TXQ_ENTRIES;
1687 e->rx_pending = q->fl_size;
1688 e->rx_mini_pending = q->rspq_size;
1689 e->rx_jumbo_pending = q->jumbo_size;
1690 e->tx_pending = q->txq_size[0];
1693 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1695 struct port_info *pi = netdev_priv(dev);
1696 struct adapter *adapter = pi->adapter;
1697 struct qset_params *q;
1700 if (e->rx_pending > MAX_RX_BUFFERS ||
1701 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1702 e->tx_pending > MAX_TXQ_ENTRIES ||
1703 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1704 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1705 e->rx_pending < MIN_FL_ENTRIES ||
1706 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1707 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1710 if (adapter->flags & FULL_INIT_DONE)
1713 q = &adapter->params.sge.qset[pi->first_qset];
1714 for (i = 0; i < pi->nqsets; ++i, ++q) {
1715 q->rspq_size = e->rx_mini_pending;
1716 q->fl_size = e->rx_pending;
1717 q->jumbo_size = e->rx_jumbo_pending;
1718 q->txq_size[0] = e->tx_pending;
1719 q->txq_size[1] = e->tx_pending;
1720 q->txq_size[2] = e->tx_pending;
1725 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1727 struct port_info *pi = netdev_priv(dev);
1728 struct adapter *adapter = pi->adapter;
1729 struct qset_params *qsp = &adapter->params.sge.qset[0];
1730 struct sge_qset *qs = &adapter->sge.qs[0];
1732 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1735 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1736 t3_update_qset_coalesce(qs, qsp);
1740 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1742 struct port_info *pi = netdev_priv(dev);
1743 struct adapter *adapter = pi->adapter;
1744 struct qset_params *q = adapter->params.sge.qset;
1746 c->rx_coalesce_usecs = q->coalesce_usecs;
1750 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1753 struct port_info *pi = netdev_priv(dev);
1754 struct adapter *adapter = pi->adapter;
1757 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1761 e->magic = EEPROM_MAGIC;
1762 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1763 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1766 memcpy(data, buf + e->offset, e->len);
1771 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1774 struct port_info *pi = netdev_priv(dev);
1775 struct adapter *adapter = pi->adapter;
1776 u32 aligned_offset, aligned_len;
1781 if (eeprom->magic != EEPROM_MAGIC)
1784 aligned_offset = eeprom->offset & ~3;
1785 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1787 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1788 buf = kmalloc(aligned_len, GFP_KERNEL);
1791 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1792 if (!err && aligned_len > 4)
1793 err = t3_seeprom_read(adapter,
1794 aligned_offset + aligned_len - 4,
1795 (__le32 *) & buf[aligned_len - 4]);
1798 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1802 err = t3_seeprom_wp(adapter, 0);
1806 for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1807 err = t3_seeprom_write(adapter, aligned_offset, *p);
1808 aligned_offset += 4;
1812 err = t3_seeprom_wp(adapter, 1);
1819 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1823 memset(&wol->sopass, 0, sizeof(wol->sopass));
1826 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1828 struct port_info *pi = netdev_priv(dev);
1831 if (data & ETH_FLAG_LRO) {
1832 if (!pi->rx_csum_offload)
1835 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1836 set_qset_lro(dev, i, 1);
1839 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1840 set_qset_lro(dev, i, 0);
1845 static const struct ethtool_ops cxgb_ethtool_ops = {
1846 .get_settings = get_settings,
1847 .set_settings = set_settings,
1848 .get_drvinfo = get_drvinfo,
1849 .get_msglevel = get_msglevel,
1850 .set_msglevel = set_msglevel,
1851 .get_ringparam = get_sge_param,
1852 .set_ringparam = set_sge_param,
1853 .get_coalesce = get_coalesce,
1854 .set_coalesce = set_coalesce,
1855 .get_eeprom_len = get_eeprom_len,
1856 .get_eeprom = get_eeprom,
1857 .set_eeprom = set_eeprom,
1858 .get_pauseparam = get_pauseparam,
1859 .set_pauseparam = set_pauseparam,
1860 .get_rx_csum = get_rx_csum,
1861 .set_rx_csum = set_rx_csum,
1862 .set_tx_csum = ethtool_op_set_tx_csum,
1863 .set_sg = ethtool_op_set_sg,
1864 .get_link = ethtool_op_get_link,
1865 .get_strings = get_strings,
1866 .phys_id = cxgb3_phys_id,
1867 .nway_reset = restart_autoneg,
1868 .get_sset_count = get_sset_count,
1869 .get_ethtool_stats = get_stats,
1870 .get_regs_len = get_regs_len,
1871 .get_regs = get_regs,
1873 .set_tso = ethtool_op_set_tso,
1874 .get_flags = ethtool_op_get_flags,
1875 .set_flags = cxgb3_set_flags,
1878 static int in_range(int val, int lo, int hi)
1880 return val < 0 || (val <= hi && val >= lo);
1883 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1885 struct port_info *pi = netdev_priv(dev);
1886 struct adapter *adapter = pi->adapter;
1890 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1894 case CHELSIO_SET_QSET_PARAMS:{
1896 struct qset_params *q;
1897 struct ch_qset_params t;
1898 int q1 = pi->first_qset;
1899 int nqsets = pi->nqsets;
1901 if (!capable(CAP_NET_ADMIN))
1903 if (copy_from_user(&t, useraddr, sizeof(t)))
1905 if (t.qset_idx >= SGE_QSETS)
1907 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1908 !in_range(t.cong_thres, 0, 255) ||
1909 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1911 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1913 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1914 MAX_CTRL_TXQ_ENTRIES) ||
1915 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1917 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1918 MAX_RX_JUMBO_BUFFERS)
1919 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1923 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1924 for_each_port(adapter, i) {
1925 pi = adap2pinfo(adapter, i);
1926 if (t.qset_idx >= pi->first_qset &&
1927 t.qset_idx < pi->first_qset + pi->nqsets &&
1928 !pi->rx_csum_offload)
1932 if ((adapter->flags & FULL_INIT_DONE) &&
1933 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1934 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1935 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1936 t.polling >= 0 || t.cong_thres >= 0))
1939 /* Allow setting of any available qset when offload enabled */
1940 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1942 for_each_port(adapter, i) {
1943 pi = adap2pinfo(adapter, i);
1944 nqsets += pi->first_qset + pi->nqsets;
1948 if (t.qset_idx < q1)
1950 if (t.qset_idx > q1 + nqsets - 1)
1953 q = &adapter->params.sge.qset[t.qset_idx];
1955 if (t.rspq_size >= 0)
1956 q->rspq_size = t.rspq_size;
1957 if (t.fl_size[0] >= 0)
1958 q->fl_size = t.fl_size[0];
1959 if (t.fl_size[1] >= 0)
1960 q->jumbo_size = t.fl_size[1];
1961 if (t.txq_size[0] >= 0)
1962 q->txq_size[0] = t.txq_size[0];
1963 if (t.txq_size[1] >= 0)
1964 q->txq_size[1] = t.txq_size[1];
1965 if (t.txq_size[2] >= 0)
1966 q->txq_size[2] = t.txq_size[2];
1967 if (t.cong_thres >= 0)
1968 q->cong_thres = t.cong_thres;
1969 if (t.intr_lat >= 0) {
1970 struct sge_qset *qs =
1971 &adapter->sge.qs[t.qset_idx];
1973 q->coalesce_usecs = t.intr_lat;
1974 t3_update_qset_coalesce(qs, q);
1976 if (t.polling >= 0) {
1977 if (adapter->flags & USING_MSIX)
1978 q->polling = t.polling;
1980 /* No polling with INTx for T3A */
1981 if (adapter->params.rev == 0 &&
1982 !(adapter->flags & USING_MSI))
1985 for (i = 0; i < SGE_QSETS; i++) {
1986 q = &adapter->params.sge.
1988 q->polling = t.polling;
1993 set_qset_lro(dev, t.qset_idx, t.lro);
1997 case CHELSIO_GET_QSET_PARAMS:{
1998 struct qset_params *q;
1999 struct ch_qset_params t;
2000 int q1 = pi->first_qset;
2001 int nqsets = pi->nqsets;
2004 if (copy_from_user(&t, useraddr, sizeof(t)))
2007 /* Display qsets for all ports when offload enabled */
2008 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2010 for_each_port(adapter, i) {
2011 pi = adap2pinfo(adapter, i);
2012 nqsets = pi->first_qset + pi->nqsets;
2016 if (t.qset_idx >= nqsets)
2019 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2020 t.rspq_size = q->rspq_size;
2021 t.txq_size[0] = q->txq_size[0];
2022 t.txq_size[1] = q->txq_size[1];
2023 t.txq_size[2] = q->txq_size[2];
2024 t.fl_size[0] = q->fl_size;
2025 t.fl_size[1] = q->jumbo_size;
2026 t.polling = q->polling;
2028 t.intr_lat = q->coalesce_usecs;
2029 t.cong_thres = q->cong_thres;
2032 if (adapter->flags & USING_MSIX)
2033 t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2035 t.vector = adapter->pdev->irq;
2037 if (copy_to_user(useraddr, &t, sizeof(t)))
2041 case CHELSIO_SET_QSET_NUM:{
2042 struct ch_reg edata;
2043 unsigned int i, first_qset = 0, other_qsets = 0;
2045 if (!capable(CAP_NET_ADMIN))
2047 if (adapter->flags & FULL_INIT_DONE)
2049 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2051 if (edata.val < 1 ||
2052 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2055 for_each_port(adapter, i)
2056 if (adapter->port[i] && adapter->port[i] != dev)
2057 other_qsets += adap2pinfo(adapter, i)->nqsets;
2059 if (edata.val + other_qsets > SGE_QSETS)
2062 pi->nqsets = edata.val;
2064 for_each_port(adapter, i)
2065 if (adapter->port[i]) {
2066 pi = adap2pinfo(adapter, i);
2067 pi->first_qset = first_qset;
2068 first_qset += pi->nqsets;
2072 case CHELSIO_GET_QSET_NUM:{
2073 struct ch_reg edata;
2075 edata.cmd = CHELSIO_GET_QSET_NUM;
2076 edata.val = pi->nqsets;
2077 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2081 case CHELSIO_LOAD_FW:{
2083 struct ch_mem_range t;
2085 if (!capable(CAP_SYS_RAWIO))
2087 if (copy_from_user(&t, useraddr, sizeof(t)))
2089 /* Check t.len sanity ? */
2090 fw_data = kmalloc(t.len, GFP_KERNEL);
2095 (fw_data, useraddr + sizeof(t), t.len)) {
2100 ret = t3_load_fw(adapter, fw_data, t.len);
2106 case CHELSIO_SETMTUTAB:{
2110 if (!is_offload(adapter))
2112 if (!capable(CAP_NET_ADMIN))
2114 if (offload_running(adapter))
2116 if (copy_from_user(&m, useraddr, sizeof(m)))
2118 if (m.nmtus != NMTUS)
2120 if (m.mtus[0] < 81) /* accommodate SACK */
2123 /* MTUs must be in ascending order */
2124 for (i = 1; i < NMTUS; ++i)
2125 if (m.mtus[i] < m.mtus[i - 1])
2128 memcpy(adapter->params.mtus, m.mtus,
2129 sizeof(adapter->params.mtus));
2132 case CHELSIO_GET_PM:{
2133 struct tp_params *p = &adapter->params.tp;
2134 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2136 if (!is_offload(adapter))
2138 m.tx_pg_sz = p->tx_pg_size;
2139 m.tx_num_pg = p->tx_num_pgs;
2140 m.rx_pg_sz = p->rx_pg_size;
2141 m.rx_num_pg = p->rx_num_pgs;
2142 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2143 if (copy_to_user(useraddr, &m, sizeof(m)))
2147 case CHELSIO_SET_PM:{
2149 struct tp_params *p = &adapter->params.tp;
2151 if (!is_offload(adapter))
2153 if (!capable(CAP_NET_ADMIN))
2155 if (adapter->flags & FULL_INIT_DONE)
2157 if (copy_from_user(&m, useraddr, sizeof(m)))
2159 if (!is_power_of_2(m.rx_pg_sz) ||
2160 !is_power_of_2(m.tx_pg_sz))
2161 return -EINVAL; /* not power of 2 */
2162 if (!(m.rx_pg_sz & 0x14000))
2163 return -EINVAL; /* not 16KB or 64KB */
2164 if (!(m.tx_pg_sz & 0x1554000))
2166 if (m.tx_num_pg == -1)
2167 m.tx_num_pg = p->tx_num_pgs;
2168 if (m.rx_num_pg == -1)
2169 m.rx_num_pg = p->rx_num_pgs;
2170 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2172 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2173 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2175 p->rx_pg_size = m.rx_pg_sz;
2176 p->tx_pg_size = m.tx_pg_sz;
2177 p->rx_num_pgs = m.rx_num_pg;
2178 p->tx_num_pgs = m.tx_num_pg;
2181 case CHELSIO_GET_MEM:{
2182 struct ch_mem_range t;
2186 if (!is_offload(adapter))
2188 if (!(adapter->flags & FULL_INIT_DONE))
2189 return -EIO; /* need the memory controllers */
2190 if (copy_from_user(&t, useraddr, sizeof(t)))
2192 if ((t.addr & 7) || (t.len & 7))
2194 if (t.mem_id == MEM_CM)
2196 else if (t.mem_id == MEM_PMRX)
2197 mem = &adapter->pmrx;
2198 else if (t.mem_id == MEM_PMTX)
2199 mem = &adapter->pmtx;
2205 * bits 0..9: chip version
2206 * bits 10..15: chip revision
2208 t.version = 3 | (adapter->params.rev << 10);
2209 if (copy_to_user(useraddr, &t, sizeof(t)))
2213 * Read 256 bytes at a time as len can be large and we don't
2214 * want to use huge intermediate buffers.
2216 useraddr += sizeof(t); /* advance to start of buffer */
2218 unsigned int chunk =
2219 min_t(unsigned int, t.len, sizeof(buf));
2222 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2226 if (copy_to_user(useraddr, buf, chunk))
2234 case CHELSIO_SET_TRACE_FILTER:{
2236 const struct trace_params *tp;
2238 if (!capable(CAP_NET_ADMIN))
2240 if (!offload_running(adapter))
2242 if (copy_from_user(&t, useraddr, sizeof(t)))
2245 tp = (const struct trace_params *)&t.sip;
2247 t3_config_trace_filter(adapter, tp, 0,
2251 t3_config_trace_filter(adapter, tp, 1,
2262 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2264 struct mii_ioctl_data *data = if_mii(req);
2265 struct port_info *pi = netdev_priv(dev);
2266 struct adapter *adapter = pi->adapter;
2271 data->phy_id = pi->phy.addr;
2275 struct cphy *phy = &pi->phy;
2277 if (!phy->mdio_read)
2279 if (is_10G(adapter)) {
2280 mmd = data->phy_id >> 8;
2283 else if (mmd > MDIO_DEV_VEND2)
2287 phy->mdio_read(adapter, data->phy_id & 0x1f,
2288 mmd, data->reg_num, &val);
2291 phy->mdio_read(adapter, data->phy_id & 0x1f,
2292 0, data->reg_num & 0x1f,
2295 data->val_out = val;
2299 struct cphy *phy = &pi->phy;
2301 if (!capable(CAP_NET_ADMIN))
2303 if (!phy->mdio_write)
2305 if (is_10G(adapter)) {
2306 mmd = data->phy_id >> 8;
2309 else if (mmd > MDIO_DEV_VEND2)
2313 phy->mdio_write(adapter,
2314 data->phy_id & 0x1f, mmd,
2319 phy->mdio_write(adapter,
2320 data->phy_id & 0x1f, 0,
2321 data->reg_num & 0x1f,
2326 return cxgb_extension_ioctl(dev, req->ifr_data);
2333 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2335 struct port_info *pi = netdev_priv(dev);
2336 struct adapter *adapter = pi->adapter;
2339 if (new_mtu < 81) /* accommodate SACK */
2341 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2344 init_port_mtus(adapter);
2345 if (adapter->params.rev == 0 && offload_running(adapter))
2346 t3_load_mtus(adapter, adapter->params.mtus,
2347 adapter->params.a_wnd, adapter->params.b_wnd,
2348 adapter->port[0]->mtu);
2352 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2354 struct port_info *pi = netdev_priv(dev);
2355 struct adapter *adapter = pi->adapter;
2356 struct sockaddr *addr = p;
2358 if (!is_valid_ether_addr(addr->sa_data))
2361 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2362 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2363 if (offload_running(adapter))
2364 write_smt_entry(adapter, pi->port_id);
2369 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2370 * @adap: the adapter
2373 * Ensures that current Rx processing on any of the queues associated with
2374 * the given port completes before returning. We do this by acquiring and
2375 * releasing the locks of the response queues associated with the port.
2377 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2381 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2382 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2384 spin_lock_irq(&q->lock);
2385 spin_unlock_irq(&q->lock);
2389 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2391 struct port_info *pi = netdev_priv(dev);
2392 struct adapter *adapter = pi->adapter;
2395 if (adapter->params.rev > 0)
2396 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2398 /* single control for all ports */
2399 unsigned int i, have_vlans = 0;
2400 for_each_port(adapter, i)
2401 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2403 t3_set_vlan_accel(adapter, 1, have_vlans);
2405 t3_synchronize_rx(adapter, pi);
2408 #ifdef CONFIG_NET_POLL_CONTROLLER
2409 static void cxgb_netpoll(struct net_device *dev)
2411 struct port_info *pi = netdev_priv(dev);
2412 struct adapter *adapter = pi->adapter;
2415 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2416 struct sge_qset *qs = &adapter->sge.qs[qidx];
2419 if (adapter->flags & USING_MSIX)
2424 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2430 * Periodic accumulation of MAC statistics.
2432 static void mac_stats_update(struct adapter *adapter)
2436 for_each_port(adapter, i) {
2437 struct net_device *dev = adapter->port[i];
2438 struct port_info *p = netdev_priv(dev);
2440 if (netif_running(dev)) {
2441 spin_lock(&adapter->stats_lock);
2442 t3_mac_update_stats(&p->mac);
2443 spin_unlock(&adapter->stats_lock);
2448 static void check_link_status(struct adapter *adapter)
2452 for_each_port(adapter, i) {
2453 struct net_device *dev = adapter->port[i];
2454 struct port_info *p = netdev_priv(dev);
2456 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2457 t3_link_changed(adapter, i);
2461 static void check_t3b2_mac(struct adapter *adapter)
2465 if (!rtnl_trylock()) /* synchronize with ifdown */
2468 for_each_port(adapter, i) {
2469 struct net_device *dev = adapter->port[i];
2470 struct port_info *p = netdev_priv(dev);
2473 if (!netif_running(dev))
2477 if (netif_running(dev) && netif_carrier_ok(dev))
2478 status = t3b2_mac_watchdog_task(&p->mac);
2480 p->mac.stats.num_toggled++;
2481 else if (status == 2) {
2482 struct cmac *mac = &p->mac;
2484 t3_mac_set_mtu(mac, dev->mtu);
2485 t3_mac_set_address(mac, 0, dev->dev_addr);
2486 cxgb_set_rxmode(dev);
2487 t3_link_start(&p->phy, mac, &p->link_config);
2488 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2489 t3_port_intr_enable(adapter, p->port_id);
2490 p->mac.stats.num_resets++;
2497 static void t3_adap_check_task(struct work_struct *work)
2499 struct adapter *adapter = container_of(work, struct adapter,
2500 adap_check_task.work);
2501 const struct adapter_params *p = &adapter->params;
2503 adapter->check_task_cnt++;
2505 /* Check link status for PHYs without interrupts */
2506 if (p->linkpoll_period)
2507 check_link_status(adapter);
2509 /* Accumulate MAC stats if needed */
2510 if (!p->linkpoll_period ||
2511 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2512 p->stats_update_period) {
2513 mac_stats_update(adapter);
2514 adapter->check_task_cnt = 0;
2517 if (p->rev == T3_REV_B2)
2518 check_t3b2_mac(adapter);
2520 /* Schedule the next check update if any port is active. */
2521 spin_lock_irq(&adapter->work_lock);
2522 if (adapter->open_device_map & PORT_MASK)
2523 schedule_chk_task(adapter);
2524 spin_unlock_irq(&adapter->work_lock);
2528 * Processes external (PHY) interrupts in process context.
2530 static void ext_intr_task(struct work_struct *work)
2532 struct adapter *adapter = container_of(work, struct adapter,
2533 ext_intr_handler_task);
2535 t3_phy_intr_handler(adapter);
2537 /* Now reenable external interrupts */
2538 spin_lock_irq(&adapter->work_lock);
2539 if (adapter->slow_intr_mask) {
2540 adapter->slow_intr_mask |= F_T3DBG;
2541 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2542 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2543 adapter->slow_intr_mask);
2545 spin_unlock_irq(&adapter->work_lock);
2549 * Interrupt-context handler for external (PHY) interrupts.
2551 void t3_os_ext_intr_handler(struct adapter *adapter)
2554 * Schedule a task to handle external interrupts as they may be slow
2555 * and we use a mutex to protect MDIO registers. We disable PHY
2556 * interrupts in the meantime and let the task reenable them when
2559 spin_lock(&adapter->work_lock);
2560 if (adapter->slow_intr_mask) {
2561 adapter->slow_intr_mask &= ~F_T3DBG;
2562 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2563 adapter->slow_intr_mask);
2564 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2566 spin_unlock(&adapter->work_lock);
2569 static int t3_adapter_error(struct adapter *adapter, int reset)
2573 /* Stop all ports */
2574 for_each_port(adapter, i) {
2575 struct net_device *netdev = adapter->port[i];
2577 if (netif_running(netdev))
2581 if (is_offload(adapter) &&
2582 test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2583 offload_close(&adapter->tdev);
2585 /* Stop SGE timers */
2586 t3_stop_sge_timers(adapter);
2588 adapter->flags &= ~FULL_INIT_DONE;
2591 ret = t3_reset_adapter(adapter);
2593 pci_disable_device(adapter->pdev);
2598 static int t3_reenable_adapter(struct adapter *adapter)
2600 if (pci_enable_device(adapter->pdev)) {
2601 dev_err(&adapter->pdev->dev,
2602 "Cannot re-enable PCI device after reset.\n");
2605 pci_set_master(adapter->pdev);
2606 pci_restore_state(adapter->pdev);
2608 /* Free sge resources */
2609 t3_free_sge_resources(adapter);
2611 if (t3_replay_prep_adapter(adapter))
2619 static void t3_resume_ports(struct adapter *adapter)
2623 /* Restart the ports */
2624 for_each_port(adapter, i) {
2625 struct net_device *netdev = adapter->port[i];
2627 if (netif_running(netdev)) {
2628 if (cxgb_open(netdev)) {
2629 dev_err(&adapter->pdev->dev,
2630 "can't bring device back up"
2639 * processes a fatal error.
2640 * Bring the ports down, reset the chip, bring the ports back up.
2642 static void fatal_error_task(struct work_struct *work)
2644 struct adapter *adapter = container_of(work, struct adapter,
2645 fatal_error_handler_task);
2649 err = t3_adapter_error(adapter, 1);
2651 err = t3_reenable_adapter(adapter);
2653 t3_resume_ports(adapter);
2655 CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2659 void t3_fatal_err(struct adapter *adapter)
2661 unsigned int fw_status[4];
2663 if (adapter->flags & FULL_INIT_DONE) {
2664 t3_sge_stop(adapter);
2665 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2666 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2667 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2668 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2670 spin_lock(&adapter->work_lock);
2671 t3_intr_disable(adapter);
2672 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2673 spin_unlock(&adapter->work_lock);
2675 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2676 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2677 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2678 fw_status[0], fw_status[1],
2679 fw_status[2], fw_status[3]);
2684 * t3_io_error_detected - called when PCI error is detected
2685 * @pdev: Pointer to PCI device
2686 * @state: The current pci connection state
2688 * This function is called after a PCI bus error affecting
2689 * this device has been detected.
2691 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2692 pci_channel_state_t state)
2694 struct adapter *adapter = pci_get_drvdata(pdev);
2697 ret = t3_adapter_error(adapter, 0);
2699 /* Request a slot reset. */
2700 return PCI_ERS_RESULT_NEED_RESET;
2704 * t3_io_slot_reset - called after the pci bus has been reset.
2705 * @pdev: Pointer to PCI device
2707 * Restart the card from scratch, as if from a cold-boot.
2709 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2711 struct adapter *adapter = pci_get_drvdata(pdev);
2713 if (!t3_reenable_adapter(adapter))
2714 return PCI_ERS_RESULT_RECOVERED;
2716 return PCI_ERS_RESULT_DISCONNECT;
2720 * t3_io_resume - called when traffic can start flowing again.
2721 * @pdev: Pointer to PCI device
2723 * This callback is called when the error recovery driver tells us that
2724 * its OK to resume normal operation.
2726 static void t3_io_resume(struct pci_dev *pdev)
2728 struct adapter *adapter = pci_get_drvdata(pdev);
2730 t3_resume_ports(adapter);
2733 static struct pci_error_handlers t3_err_handler = {
2734 .error_detected = t3_io_error_detected,
2735 .slot_reset = t3_io_slot_reset,
2736 .resume = t3_io_resume,
2740 * Set the number of qsets based on the number of CPUs and the number of ports,
2741 * not to exceed the number of available qsets, assuming there are enough qsets
2744 static void set_nqsets(struct adapter *adap)
2747 int num_cpus = num_online_cpus();
2748 int hwports = adap->params.nports;
2749 int nqsets = SGE_QSETS;
2751 if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2753 (hwports * nqsets > SGE_QSETS ||
2754 num_cpus >= nqsets / hwports))
2756 if (nqsets > num_cpus)
2758 if (nqsets < 1 || hwports == 4)
2763 for_each_port(adap, i) {
2764 struct port_info *pi = adap2pinfo(adap, i);
2767 pi->nqsets = nqsets;
2768 j = pi->first_qset + nqsets;
2770 dev_info(&adap->pdev->dev,
2771 "Port %d using %d queue sets.\n", i, nqsets);
2775 static int __devinit cxgb_enable_msix(struct adapter *adap)
2777 struct msix_entry entries[SGE_QSETS + 1];
2780 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2781 entries[i].entry = i;
2783 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2785 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2786 adap->msix_info[i].vec = entries[i].vector;
2788 dev_info(&adap->pdev->dev,
2789 "only %d MSI-X vectors left, not using MSI-X\n", err);
2793 static void __devinit print_port_info(struct adapter *adap,
2794 const struct adapter_info *ai)
2796 static const char *pci_variant[] = {
2797 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2804 snprintf(buf, sizeof(buf), "%s x%d",
2805 pci_variant[adap->params.pci.variant],
2806 adap->params.pci.width);
2808 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2809 pci_variant[adap->params.pci.variant],
2810 adap->params.pci.speed, adap->params.pci.width);
2812 for_each_port(adap, i) {
2813 struct net_device *dev = adap->port[i];
2814 const struct port_info *pi = netdev_priv(dev);
2816 if (!test_bit(i, &adap->registered_device_map))
2818 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2819 dev->name, ai->desc, pi->phy.desc,
2820 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2821 (adap->flags & USING_MSIX) ? " MSI-X" :
2822 (adap->flags & USING_MSI) ? " MSI" : "");
2823 if (adap->name == dev->name && adap->params.vpd.mclk)
2825 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2826 adap->name, t3_mc7_size(&adap->cm) >> 20,
2827 t3_mc7_size(&adap->pmtx) >> 20,
2828 t3_mc7_size(&adap->pmrx) >> 20,
2829 adap->params.vpd.sn);
2833 static const struct net_device_ops cxgb_netdev_ops = {
2834 .ndo_open = cxgb_open,
2835 .ndo_stop = cxgb_close,
2836 .ndo_start_xmit = t3_eth_xmit,
2837 .ndo_get_stats = cxgb_get_stats,
2838 .ndo_validate_addr = eth_validate_addr,
2839 .ndo_set_multicast_list = cxgb_set_rxmode,
2840 .ndo_do_ioctl = cxgb_ioctl,
2841 .ndo_change_mtu = cxgb_change_mtu,
2842 .ndo_set_mac_address = cxgb_set_mac_addr,
2843 .ndo_vlan_rx_register = vlan_rx_register,
2844 #ifdef CONFIG_NET_POLL_CONTROLLER
2845 .ndo_poll_controller = cxgb_netpoll,
2849 static int __devinit init_one(struct pci_dev *pdev,
2850 const struct pci_device_id *ent)
2852 static int version_printed;
2854 int i, err, pci_using_dac = 0;
2855 unsigned long mmio_start, mmio_len;
2856 const struct adapter_info *ai;
2857 struct adapter *adapter = NULL;
2858 struct port_info *pi;
2860 if (!version_printed) {
2861 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2866 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2868 printk(KERN_ERR DRV_NAME
2869 ": cannot initialize work queue\n");
2874 err = pci_request_regions(pdev, DRV_NAME);
2876 /* Just info, some other driver may have claimed the device. */
2877 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2881 err = pci_enable_device(pdev);
2883 dev_err(&pdev->dev, "cannot enable PCI device\n");
2884 goto out_release_regions;
2887 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2889 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2891 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2892 "coherent allocations\n");
2893 goto out_disable_device;
2895 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2896 dev_err(&pdev->dev, "no usable DMA configuration\n");
2897 goto out_disable_device;
2900 pci_set_master(pdev);
2901 pci_save_state(pdev);
2903 mmio_start = pci_resource_start(pdev, 0);
2904 mmio_len = pci_resource_len(pdev, 0);
2905 ai = t3_get_adapter_info(ent->driver_data);
2907 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2910 goto out_disable_device;
2913 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2914 if (!adapter->regs) {
2915 dev_err(&pdev->dev, "cannot map device registers\n");
2917 goto out_free_adapter;
2920 adapter->pdev = pdev;
2921 adapter->name = pci_name(pdev);
2922 adapter->msg_enable = dflt_msg_enable;
2923 adapter->mmio_len = mmio_len;
2925 mutex_init(&adapter->mdio_lock);
2926 spin_lock_init(&adapter->work_lock);
2927 spin_lock_init(&adapter->stats_lock);
2929 INIT_LIST_HEAD(&adapter->adapter_list);
2930 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2931 INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2932 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2934 for (i = 0; i < ai->nports; ++i) {
2935 struct net_device *netdev;
2937 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
2943 SET_NETDEV_DEV(netdev, &pdev->dev);
2945 adapter->port[i] = netdev;
2946 pi = netdev_priv(netdev);
2947 pi->adapter = adapter;
2948 pi->rx_csum_offload = 1;
2950 netif_carrier_off(netdev);
2951 netif_tx_stop_all_queues(netdev);
2952 netdev->irq = pdev->irq;
2953 netdev->mem_start = mmio_start;
2954 netdev->mem_end = mmio_start + mmio_len - 1;
2955 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2956 netdev->features |= NETIF_F_LLTX;
2958 netdev->features |= NETIF_F_HIGHDMA;
2960 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2961 netdev->netdev_ops = &cxgb_netdev_ops;
2962 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2965 pci_set_drvdata(pdev, adapter);
2966 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2972 * The card is now ready to go. If any errors occur during device
2973 * registration we do not fail the whole card but rather proceed only
2974 * with the ports we manage to register successfully. However we must
2975 * register at least one net device.
2977 for_each_port(adapter, i) {
2978 err = register_netdev(adapter->port[i]);
2980 dev_warn(&pdev->dev,
2981 "cannot register net device %s, skipping\n",
2982 adapter->port[i]->name);
2985 * Change the name we use for messages to the name of
2986 * the first successfully registered interface.
2988 if (!adapter->registered_device_map)
2989 adapter->name = adapter->port[i]->name;
2991 __set_bit(i, &adapter->registered_device_map);
2994 if (!adapter->registered_device_map) {
2995 dev_err(&pdev->dev, "could not register any net devices\n");
2999 /* Driver's ready. Reflect it on LEDs */
3000 t3_led_ready(adapter);
3002 if (is_offload(adapter)) {
3003 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3004 cxgb3_adapter_ofld(adapter);
3007 /* See what interrupts we'll be using */
3008 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3009 adapter->flags |= USING_MSIX;
3010 else if (msi > 0 && pci_enable_msi(pdev) == 0)
3011 adapter->flags |= USING_MSI;
3013 set_nqsets(adapter);
3015 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3018 print_port_info(adapter, ai);
3022 iounmap(adapter->regs);
3023 for (i = ai->nports - 1; i >= 0; --i)
3024 if (adapter->port[i])
3025 free_netdev(adapter->port[i]);
3031 pci_disable_device(pdev);
3032 out_release_regions:
3033 pci_release_regions(pdev);
3034 pci_set_drvdata(pdev, NULL);
3038 static void __devexit remove_one(struct pci_dev *pdev)
3040 struct adapter *adapter = pci_get_drvdata(pdev);
3045 t3_sge_stop(adapter);
3046 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3049 if (is_offload(adapter)) {
3050 cxgb3_adapter_unofld(adapter);
3051 if (test_bit(OFFLOAD_DEVMAP_BIT,
3052 &adapter->open_device_map))
3053 offload_close(&adapter->tdev);
3056 for_each_port(adapter, i)
3057 if (test_bit(i, &adapter->registered_device_map))
3058 unregister_netdev(adapter->port[i]);
3060 t3_stop_sge_timers(adapter);
3061 t3_free_sge_resources(adapter);
3062 cxgb_disable_msi(adapter);
3064 for_each_port(adapter, i)
3065 if (adapter->port[i])
3066 free_netdev(adapter->port[i]);
3068 iounmap(adapter->regs);
3070 pci_release_regions(pdev);
3071 pci_disable_device(pdev);
3072 pci_set_drvdata(pdev, NULL);
3076 static struct pci_driver driver = {
3078 .id_table = cxgb3_pci_tbl,
3080 .remove = __devexit_p(remove_one),
3081 .err_handler = &t3_err_handler,
3084 static int __init cxgb3_init_module(void)
3088 cxgb3_offload_init();
3090 ret = pci_register_driver(&driver);
3094 static void __exit cxgb3_cleanup_module(void)
3096 pci_unregister_driver(&driver);
3098 destroy_workqueue(cxgb3_wq);
3101 module_init(cxgb3_init_module);
3102 module_exit(cxgb3_cleanup_module);