Merge branch 'core-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[pandora-kernel.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
94         CH_DEVICE(0x36, 3),     /* S320E-CR */
95         CH_DEVICE(0x37, 7),     /* N320E-G2 */
96         {0,}
97 };
98
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
104
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
106
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
109
110 /*
111  * The driver uses the best interrupt scheme available on a platform in the
112  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
113  * of these schemes the driver may consider as follows:
114  *
115  * msi = 2: choose from among all three options
116  * msi = 1: only consider MSI and pin interrupts
117  * msi = 0: force pin interrupts
118  */
119 static int msi = 2;
120
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
123
124 /*
125  * The driver enables offload as a default.
126  * To disable it, use ofld_disable = 1.
127  */
128
129 static int ofld_disable = 0;
130
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
133
134 /*
135  * We have work elements that we need to cancel when an interface is taken
136  * down.  Normally the work elements would be executed by keventd but that
137  * can deadlock because of linkwatch.  If our close method takes the rtnl
138  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140  * for our work to complete.  Get our own work queue to solve this.
141  */
142 static struct workqueue_struct *cxgb3_wq;
143
144 /**
145  *      link_report - show link status and link speed/duplex
146  *      @p: the port whose settings are to be reported
147  *
148  *      Shows the link status, speed, and duplex of a port.
149  */
150 static void link_report(struct net_device *dev)
151 {
152         if (!netif_carrier_ok(dev))
153                 printk(KERN_INFO "%s: link down\n", dev->name);
154         else {
155                 const char *s = "10Mbps";
156                 const struct port_info *p = netdev_priv(dev);
157
158                 switch (p->link_config.speed) {
159                 case SPEED_10000:
160                         s = "10Gbps";
161                         break;
162                 case SPEED_1000:
163                         s = "1000Mbps";
164                         break;
165                 case SPEED_100:
166                         s = "100Mbps";
167                         break;
168                 }
169
170                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
172         }
173 }
174
175 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
176 {
177         struct net_device *dev = adap->port[port_id];
178         struct port_info *pi = netdev_priv(dev);
179
180         if (state == netif_carrier_ok(dev))
181                 return;
182
183         if (state) {
184                 struct cmac *mac = &pi->mac;
185
186                 netif_carrier_on(dev);
187
188                 /* Clear local faults */
189                 t3_xgm_intr_disable(adap, pi->port_id);
190                 t3_read_reg(adap, A_XGM_INT_STATUS +
191                                     pi->mac.offset);
192                 t3_write_reg(adap,
193                              A_XGM_INT_CAUSE + pi->mac.offset,
194                              F_XGM_INT);
195
196                 t3_set_reg_field(adap,
197                                  A_XGM_INT_ENABLE +
198                                  pi->mac.offset,
199                                  F_XGM_INT, F_XGM_INT);
200                 t3_xgm_intr_enable(adap, pi->port_id);
201
202                 t3_mac_enable(mac, MAC_DIRECTION_TX);
203         } else
204                 netif_carrier_off(dev);
205
206         link_report(dev);
207 }
208
209 /**
210  *      t3_os_link_changed - handle link status changes
211  *      @adapter: the adapter associated with the link change
212  *      @port_id: the port index whose limk status has changed
213  *      @link_stat: the new status of the link
214  *      @speed: the new speed setting
215  *      @duplex: the new duplex setting
216  *      @pause: the new flow-control setting
217  *
218  *      This is the OS-dependent handler for link status changes.  The OS
219  *      neutral handler takes care of most of the processing for these events,
220  *      then calls this handler for any OS-specific processing.
221  */
222 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223                         int speed, int duplex, int pause)
224 {
225         struct net_device *dev = adapter->port[port_id];
226         struct port_info *pi = netdev_priv(dev);
227         struct cmac *mac = &pi->mac;
228
229         /* Skip changes from disabled ports. */
230         if (!netif_running(dev))
231                 return;
232
233         if (link_stat != netif_carrier_ok(dev)) {
234                 if (link_stat) {
235                         t3_mac_enable(mac, MAC_DIRECTION_RX);
236
237                         /* Clear local faults */
238                         t3_xgm_intr_disable(adapter, pi->port_id);
239                         t3_read_reg(adapter, A_XGM_INT_STATUS +
240                                     pi->mac.offset);
241                         t3_write_reg(adapter,
242                                      A_XGM_INT_CAUSE + pi->mac.offset,
243                                      F_XGM_INT);
244
245                         t3_set_reg_field(adapter,
246                                          A_XGM_INT_ENABLE + pi->mac.offset,
247                                          F_XGM_INT, F_XGM_INT);
248                         t3_xgm_intr_enable(adapter, pi->port_id);
249
250                         netif_carrier_on(dev);
251                 } else {
252                         netif_carrier_off(dev);
253
254                         t3_xgm_intr_disable(adapter, pi->port_id);
255                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256                         t3_set_reg_field(adapter,
257                                          A_XGM_INT_ENABLE + pi->mac.offset,
258                                          F_XGM_INT, 0);
259
260                         if (is_10G(adapter))
261                                 pi->phy.ops->power_down(&pi->phy, 1);
262
263                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
264                         t3_mac_disable(mac, MAC_DIRECTION_RX);
265                         t3_link_start(&pi->phy, mac, &pi->link_config);
266                 }
267
268                 link_report(dev);
269         }
270 }
271
272 /**
273  *      t3_os_phymod_changed - handle PHY module changes
274  *      @phy: the PHY reporting the module change
275  *      @mod_type: new module type
276  *
277  *      This is the OS-dependent handler for PHY module changes.  It is
278  *      invoked when a PHY module is removed or inserted for any OS-specific
279  *      processing.
280  */
281 void t3_os_phymod_changed(struct adapter *adap, int port_id)
282 {
283         static const char *mod_str[] = {
284                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
285         };
286
287         const struct net_device *dev = adap->port[port_id];
288         const struct port_info *pi = netdev_priv(dev);
289
290         if (pi->phy.modtype == phy_modtype_none)
291                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
292         else
293                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294                        mod_str[pi->phy.modtype]);
295 }
296
297 static void cxgb_set_rxmode(struct net_device *dev)
298 {
299         struct t3_rx_mode rm;
300         struct port_info *pi = netdev_priv(dev);
301
302         init_rx_mode(&rm, dev, dev->mc_list);
303         t3_mac_set_rx_mode(&pi->mac, &rm);
304 }
305
306 /**
307  *      link_start - enable a port
308  *      @dev: the device to enable
309  *
310  *      Performs the MAC and PHY actions needed to enable a port.
311  */
312 static void link_start(struct net_device *dev)
313 {
314         struct t3_rx_mode rm;
315         struct port_info *pi = netdev_priv(dev);
316         struct cmac *mac = &pi->mac;
317
318         init_rx_mode(&rm, dev, dev->mc_list);
319         t3_mac_reset(mac);
320         t3_mac_set_mtu(mac, dev->mtu);
321         t3_mac_set_address(mac, 0, dev->dev_addr);
322         t3_mac_set_rx_mode(mac, &rm);
323         t3_link_start(&pi->phy, mac, &pi->link_config);
324         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
325 }
326
327 static inline void cxgb_disable_msi(struct adapter *adapter)
328 {
329         if (adapter->flags & USING_MSIX) {
330                 pci_disable_msix(adapter->pdev);
331                 adapter->flags &= ~USING_MSIX;
332         } else if (adapter->flags & USING_MSI) {
333                 pci_disable_msi(adapter->pdev);
334                 adapter->flags &= ~USING_MSI;
335         }
336 }
337
338 /*
339  * Interrupt handler for asynchronous events used with MSI-X.
340  */
341 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
342 {
343         t3_slow_intr_handler(cookie);
344         return IRQ_HANDLED;
345 }
346
347 /*
348  * Name the MSI-X interrupts.
349  */
350 static void name_msix_vecs(struct adapter *adap)
351 {
352         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
353
354         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355         adap->msix_info[0].desc[n] = 0;
356
357         for_each_port(adap, j) {
358                 struct net_device *d = adap->port[j];
359                 const struct port_info *pi = netdev_priv(d);
360
361                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362                         snprintf(adap->msix_info[msi_idx].desc, n,
363                                  "%s-%d", d->name, pi->first_qset + i);
364                         adap->msix_info[msi_idx].desc[n] = 0;
365                 }
366         }
367 }
368
369 static int request_msix_data_irqs(struct adapter *adap)
370 {
371         int i, j, err, qidx = 0;
372
373         for_each_port(adap, i) {
374                 int nqsets = adap2pinfo(adap, i)->nqsets;
375
376                 for (j = 0; j < nqsets; ++j) {
377                         err = request_irq(adap->msix_info[qidx + 1].vec,
378                                           t3_intr_handler(adap,
379                                                           adap->sge.qs[qidx].
380                                                           rspq.polling), 0,
381                                           adap->msix_info[qidx + 1].desc,
382                                           &adap->sge.qs[qidx]);
383                         if (err) {
384                                 while (--qidx >= 0)
385                                         free_irq(adap->msix_info[qidx + 1].vec,
386                                                  &adap->sge.qs[qidx]);
387                                 return err;
388                         }
389                         qidx++;
390                 }
391         }
392         return 0;
393 }
394
395 static void free_irq_resources(struct adapter *adapter)
396 {
397         if (adapter->flags & USING_MSIX) {
398                 int i, n = 0;
399
400                 free_irq(adapter->msix_info[0].vec, adapter);
401                 for_each_port(adapter, i)
402                         n += adap2pinfo(adapter, i)->nqsets;
403
404                 for (i = 0; i < n; ++i)
405                         free_irq(adapter->msix_info[i + 1].vec,
406                                  &adapter->sge.qs[i]);
407         } else
408                 free_irq(adapter->pdev->irq, adapter);
409 }
410
411 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
412                               unsigned long n)
413 {
414         int attempts = 5;
415
416         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
417                 if (!--attempts)
418                         return -ETIMEDOUT;
419                 msleep(10);
420         }
421         return 0;
422 }
423
424 static int init_tp_parity(struct adapter *adap)
425 {
426         int i;
427         struct sk_buff *skb;
428         struct cpl_set_tcb_field *greq;
429         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
430
431         t3_tp_set_offload_mode(adap, 1);
432
433         for (i = 0; i < 16; i++) {
434                 struct cpl_smt_write_req *req;
435
436                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
437                 if (!skb)
438                         skb = adap->nofail_skb;
439                 if (!skb)
440                         goto alloc_skb_fail;
441
442                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
443                 memset(req, 0, sizeof(*req));
444                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
445                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
446                 req->iff = i;
447                 t3_mgmt_tx(adap, skb);
448                 if (skb == adap->nofail_skb) {
449                         await_mgmt_replies(adap, cnt, i + 1);
450                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451                         if (!adap->nofail_skb)
452                                 goto alloc_skb_fail;
453                 }
454         }
455
456         for (i = 0; i < 2048; i++) {
457                 struct cpl_l2t_write_req *req;
458
459                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
460                 if (!skb)
461                         skb = adap->nofail_skb;
462                 if (!skb)
463                         goto alloc_skb_fail;
464
465                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
466                 memset(req, 0, sizeof(*req));
467                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
468                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
469                 req->params = htonl(V_L2T_W_IDX(i));
470                 t3_mgmt_tx(adap, skb);
471                 if (skb == adap->nofail_skb) {
472                         await_mgmt_replies(adap, cnt, 16 + i + 1);
473                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474                         if (!adap->nofail_skb)
475                                 goto alloc_skb_fail;
476                 }
477         }
478
479         for (i = 0; i < 2048; i++) {
480                 struct cpl_rte_write_req *req;
481
482                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
483                 if (!skb)
484                         skb = adap->nofail_skb;
485                 if (!skb)
486                         goto alloc_skb_fail;
487
488                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
489                 memset(req, 0, sizeof(*req));
490                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
491                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
492                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
493                 t3_mgmt_tx(adap, skb);
494                 if (skb == adap->nofail_skb) {
495                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497                         if (!adap->nofail_skb)
498                                 goto alloc_skb_fail;
499                 }
500         }
501
502         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503         if (!skb)
504                 skb = adap->nofail_skb;
505         if (!skb)
506                 goto alloc_skb_fail;
507
508         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
509         memset(greq, 0, sizeof(*greq));
510         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
511         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
512         greq->mask = cpu_to_be64(1);
513         t3_mgmt_tx(adap, skb);
514
515         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516         if (skb == adap->nofail_skb) {
517                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
519         }
520
521         t3_tp_set_offload_mode(adap, 0);
522         return i;
523
524 alloc_skb_fail:
525         t3_tp_set_offload_mode(adap, 0);
526         return -ENOMEM;
527 }
528
529 /**
530  *      setup_rss - configure RSS
531  *      @adap: the adapter
532  *
533  *      Sets up RSS to distribute packets to multiple receive queues.  We
534  *      configure the RSS CPU lookup table to distribute to the number of HW
535  *      receive queues, and the response queue lookup table to narrow that
536  *      down to the response queues actually configured for each port.
537  *      We always configure the RSS mapping for two ports since the mapping
538  *      table has plenty of entries.
539  */
540 static void setup_rss(struct adapter *adap)
541 {
542         int i;
543         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
544         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
545         u8 cpus[SGE_QSETS + 1];
546         u16 rspq_map[RSS_TABLE_SIZE];
547
548         for (i = 0; i < SGE_QSETS; ++i)
549                 cpus[i] = i;
550         cpus[SGE_QSETS] = 0xff; /* terminator */
551
552         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
553                 rspq_map[i] = i % nq0;
554                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
555         }
556
557         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
558                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
559                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
560 }
561
562 static void init_napi(struct adapter *adap)
563 {
564         int i;
565
566         for (i = 0; i < SGE_QSETS; i++) {
567                 struct sge_qset *qs = &adap->sge.qs[i];
568
569                 if (qs->adap)
570                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
571                                        64);
572         }
573
574         /*
575          * netif_napi_add() can be called only once per napi_struct because it
576          * adds each new napi_struct to a list.  Be careful not to call it a
577          * second time, e.g., during EEH recovery, by making a note of it.
578          */
579         adap->flags |= NAPI_INIT;
580 }
581
582 /*
583  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
584  * both netdevices representing interfaces and the dummy ones for the extra
585  * queues.
586  */
587 static void quiesce_rx(struct adapter *adap)
588 {
589         int i;
590
591         for (i = 0; i < SGE_QSETS; i++)
592                 if (adap->sge.qs[i].adap)
593                         napi_disable(&adap->sge.qs[i].napi);
594 }
595
596 static void enable_all_napi(struct adapter *adap)
597 {
598         int i;
599         for (i = 0; i < SGE_QSETS; i++)
600                 if (adap->sge.qs[i].adap)
601                         napi_enable(&adap->sge.qs[i].napi);
602 }
603
604 /**
605  *      set_qset_lro - Turn a queue set's LRO capability on and off
606  *      @dev: the device the qset is attached to
607  *      @qset_idx: the queue set index
608  *      @val: the LRO switch
609  *
610  *      Sets LRO on or off for a particular queue set.
611  *      the device's features flag is updated to reflect the LRO
612  *      capability when all queues belonging to the device are
613  *      in the same state.
614  */
615 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
616 {
617         struct port_info *pi = netdev_priv(dev);
618         struct adapter *adapter = pi->adapter;
619
620         adapter->params.sge.qset[qset_idx].lro = !!val;
621         adapter->sge.qs[qset_idx].lro_enabled = !!val;
622 }
623
624 /**
625  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
626  *      @adap: the adapter
627  *
628  *      Determines how many sets of SGE queues to use and initializes them.
629  *      We support multiple queue sets per port if we have MSI-X, otherwise
630  *      just one queue set per port.
631  */
632 static int setup_sge_qsets(struct adapter *adap)
633 {
634         int i, j, err, irq_idx = 0, qset_idx = 0;
635         unsigned int ntxq = SGE_TXQ_PER_SET;
636
637         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
638                 irq_idx = -1;
639
640         for_each_port(adap, i) {
641                 struct net_device *dev = adap->port[i];
642                 struct port_info *pi = netdev_priv(dev);
643
644                 pi->qs = &adap->sge.qs[pi->first_qset];
645                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
646                         set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
647                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
648                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
649                                                              irq_idx,
650                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
651                                 netdev_get_tx_queue(dev, j));
652                         if (err) {
653                                 t3_free_sge_resources(adap);
654                                 return err;
655                         }
656                 }
657         }
658
659         return 0;
660 }
661
662 static ssize_t attr_show(struct device *d, char *buf,
663                          ssize_t(*format) (struct net_device *, char *))
664 {
665         ssize_t len;
666
667         /* Synchronize with ioctls that may shut down the device */
668         rtnl_lock();
669         len = (*format) (to_net_dev(d), buf);
670         rtnl_unlock();
671         return len;
672 }
673
674 static ssize_t attr_store(struct device *d,
675                           const char *buf, size_t len,
676                           ssize_t(*set) (struct net_device *, unsigned int),
677                           unsigned int min_val, unsigned int max_val)
678 {
679         char *endp;
680         ssize_t ret;
681         unsigned int val;
682
683         if (!capable(CAP_NET_ADMIN))
684                 return -EPERM;
685
686         val = simple_strtoul(buf, &endp, 0);
687         if (endp == buf || val < min_val || val > max_val)
688                 return -EINVAL;
689
690         rtnl_lock();
691         ret = (*set) (to_net_dev(d), val);
692         if (!ret)
693                 ret = len;
694         rtnl_unlock();
695         return ret;
696 }
697
698 #define CXGB3_SHOW(name, val_expr) \
699 static ssize_t format_##name(struct net_device *dev, char *buf) \
700 { \
701         struct port_info *pi = netdev_priv(dev); \
702         struct adapter *adap = pi->adapter; \
703         return sprintf(buf, "%u\n", val_expr); \
704 } \
705 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
706                            char *buf) \
707 { \
708         return attr_show(d, buf, format_##name); \
709 }
710
711 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
712 {
713         struct port_info *pi = netdev_priv(dev);
714         struct adapter *adap = pi->adapter;
715         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
716
717         if (adap->flags & FULL_INIT_DONE)
718                 return -EBUSY;
719         if (val && adap->params.rev == 0)
720                 return -EINVAL;
721         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
722             min_tids)
723                 return -EINVAL;
724         adap->params.mc5.nfilters = val;
725         return 0;
726 }
727
728 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
729                               const char *buf, size_t len)
730 {
731         return attr_store(d, buf, len, set_nfilters, 0, ~0);
732 }
733
734 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
735 {
736         struct port_info *pi = netdev_priv(dev);
737         struct adapter *adap = pi->adapter;
738
739         if (adap->flags & FULL_INIT_DONE)
740                 return -EBUSY;
741         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
742             MC5_MIN_TIDS)
743                 return -EINVAL;
744         adap->params.mc5.nservers = val;
745         return 0;
746 }
747
748 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
749                               const char *buf, size_t len)
750 {
751         return attr_store(d, buf, len, set_nservers, 0, ~0);
752 }
753
754 #define CXGB3_ATTR_R(name, val_expr) \
755 CXGB3_SHOW(name, val_expr) \
756 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
757
758 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
759 CXGB3_SHOW(name, val_expr) \
760 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
761
762 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
763 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
764 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
765
766 static struct attribute *cxgb3_attrs[] = {
767         &dev_attr_cam_size.attr,
768         &dev_attr_nfilters.attr,
769         &dev_attr_nservers.attr,
770         NULL
771 };
772
773 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
774
775 static ssize_t tm_attr_show(struct device *d,
776                             char *buf, int sched)
777 {
778         struct port_info *pi = netdev_priv(to_net_dev(d));
779         struct adapter *adap = pi->adapter;
780         unsigned int v, addr, bpt, cpt;
781         ssize_t len;
782
783         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
784         rtnl_lock();
785         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
786         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
787         if (sched & 1)
788                 v >>= 16;
789         bpt = (v >> 8) & 0xff;
790         cpt = v & 0xff;
791         if (!cpt)
792                 len = sprintf(buf, "disabled\n");
793         else {
794                 v = (adap->params.vpd.cclk * 1000) / cpt;
795                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
796         }
797         rtnl_unlock();
798         return len;
799 }
800
801 static ssize_t tm_attr_store(struct device *d,
802                              const char *buf, size_t len, int sched)
803 {
804         struct port_info *pi = netdev_priv(to_net_dev(d));
805         struct adapter *adap = pi->adapter;
806         unsigned int val;
807         char *endp;
808         ssize_t ret;
809
810         if (!capable(CAP_NET_ADMIN))
811                 return -EPERM;
812
813         val = simple_strtoul(buf, &endp, 0);
814         if (endp == buf || val > 10000000)
815                 return -EINVAL;
816
817         rtnl_lock();
818         ret = t3_config_sched(adap, val, sched);
819         if (!ret)
820                 ret = len;
821         rtnl_unlock();
822         return ret;
823 }
824
825 #define TM_ATTR(name, sched) \
826 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
827                            char *buf) \
828 { \
829         return tm_attr_show(d, buf, sched); \
830 } \
831 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
832                             const char *buf, size_t len) \
833 { \
834         return tm_attr_store(d, buf, len, sched); \
835 } \
836 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
837
838 TM_ATTR(sched0, 0);
839 TM_ATTR(sched1, 1);
840 TM_ATTR(sched2, 2);
841 TM_ATTR(sched3, 3);
842 TM_ATTR(sched4, 4);
843 TM_ATTR(sched5, 5);
844 TM_ATTR(sched6, 6);
845 TM_ATTR(sched7, 7);
846
847 static struct attribute *offload_attrs[] = {
848         &dev_attr_sched0.attr,
849         &dev_attr_sched1.attr,
850         &dev_attr_sched2.attr,
851         &dev_attr_sched3.attr,
852         &dev_attr_sched4.attr,
853         &dev_attr_sched5.attr,
854         &dev_attr_sched6.attr,
855         &dev_attr_sched7.attr,
856         NULL
857 };
858
859 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
860
861 /*
862  * Sends an sk_buff to an offload queue driver
863  * after dealing with any active network taps.
864  */
865 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
866 {
867         int ret;
868
869         local_bh_disable();
870         ret = t3_offload_tx(tdev, skb);
871         local_bh_enable();
872         return ret;
873 }
874
875 static int write_smt_entry(struct adapter *adapter, int idx)
876 {
877         struct cpl_smt_write_req *req;
878         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
879
880         if (!skb)
881                 return -ENOMEM;
882
883         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
884         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
885         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
886         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
887         req->iff = idx;
888         memset(req->src_mac1, 0, sizeof(req->src_mac1));
889         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
890         skb->priority = 1;
891         offload_tx(&adapter->tdev, skb);
892         return 0;
893 }
894
895 static int init_smt(struct adapter *adapter)
896 {
897         int i;
898
899         for_each_port(adapter, i)
900             write_smt_entry(adapter, i);
901         return 0;
902 }
903
904 static void init_port_mtus(struct adapter *adapter)
905 {
906         unsigned int mtus = adapter->port[0]->mtu;
907
908         if (adapter->port[1])
909                 mtus |= adapter->port[1]->mtu << 16;
910         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
911 }
912
913 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
914                               int hi, int port)
915 {
916         struct sk_buff *skb;
917         struct mngt_pktsched_wr *req;
918         int ret;
919
920         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
921         if (!skb)
922                 skb = adap->nofail_skb;
923         if (!skb)
924                 return -ENOMEM;
925
926         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
927         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
928         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
929         req->sched = sched;
930         req->idx = qidx;
931         req->min = lo;
932         req->max = hi;
933         req->binding = port;
934         ret = t3_mgmt_tx(adap, skb);
935         if (skb == adap->nofail_skb) {
936                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
937                                              GFP_KERNEL);
938                 if (!adap->nofail_skb)
939                         ret = -ENOMEM;
940         }
941
942         return ret;
943 }
944
945 static int bind_qsets(struct adapter *adap)
946 {
947         int i, j, err = 0;
948
949         for_each_port(adap, i) {
950                 const struct port_info *pi = adap2pinfo(adap, i);
951
952                 for (j = 0; j < pi->nqsets; ++j) {
953                         int ret = send_pktsched_cmd(adap, 1,
954                                                     pi->first_qset + j, -1,
955                                                     -1, i);
956                         if (ret)
957                                 err = ret;
958                 }
959         }
960
961         return err;
962 }
963
964 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
965 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
966
967 static int upgrade_fw(struct adapter *adap)
968 {
969         int ret;
970         char buf[64];
971         const struct firmware *fw;
972         struct device *dev = &adap->pdev->dev;
973
974         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
975                  FW_VERSION_MINOR, FW_VERSION_MICRO);
976         ret = request_firmware(&fw, buf, dev);
977         if (ret < 0) {
978                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
979                         buf);
980                 return ret;
981         }
982         ret = t3_load_fw(adap, fw->data, fw->size);
983         release_firmware(fw);
984
985         if (ret == 0)
986                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
987                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
988         else
989                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
990                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
991
992         return ret;
993 }
994
995 static inline char t3rev2char(struct adapter *adapter)
996 {
997         char rev = 0;
998
999         switch(adapter->params.rev) {
1000         case T3_REV_B:
1001         case T3_REV_B2:
1002                 rev = 'b';
1003                 break;
1004         case T3_REV_C:
1005                 rev = 'c';
1006                 break;
1007         }
1008         return rev;
1009 }
1010
1011 static int update_tpsram(struct adapter *adap)
1012 {
1013         const struct firmware *tpsram;
1014         char buf[64];
1015         struct device *dev = &adap->pdev->dev;
1016         int ret;
1017         char rev;
1018
1019         rev = t3rev2char(adap);
1020         if (!rev)
1021                 return 0;
1022
1023         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1024                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1025
1026         ret = request_firmware(&tpsram, buf, dev);
1027         if (ret < 0) {
1028                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1029                         buf);
1030                 return ret;
1031         }
1032
1033         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1034         if (ret)
1035                 goto release_tpsram;
1036
1037         ret = t3_set_proto_sram(adap, tpsram->data);
1038         if (ret == 0)
1039                 dev_info(dev,
1040                          "successful update of protocol engine "
1041                          "to %d.%d.%d\n",
1042                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1043         else
1044                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1045                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1046         if (ret)
1047                 dev_err(dev, "loading protocol SRAM failed\n");
1048
1049 release_tpsram:
1050         release_firmware(tpsram);
1051
1052         return ret;
1053 }
1054
1055 /**
1056  *      cxgb_up - enable the adapter
1057  *      @adapter: adapter being enabled
1058  *
1059  *      Called when the first port is enabled, this function performs the
1060  *      actions necessary to make an adapter operational, such as completing
1061  *      the initialization of HW modules, and enabling interrupts.
1062  *
1063  *      Must be called with the rtnl lock held.
1064  */
1065 static int cxgb_up(struct adapter *adap)
1066 {
1067         int err;
1068
1069         if (!(adap->flags & FULL_INIT_DONE)) {
1070                 err = t3_check_fw_version(adap);
1071                 if (err == -EINVAL) {
1072                         err = upgrade_fw(adap);
1073                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1074                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1075                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1076                 }
1077
1078                 err = t3_check_tpsram_version(adap);
1079                 if (err == -EINVAL) {
1080                         err = update_tpsram(adap);
1081                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1082                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1083                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1084                 }
1085
1086                 /*
1087                  * Clear interrupts now to catch errors if t3_init_hw fails.
1088                  * We clear them again later as initialization may trigger
1089                  * conditions that can interrupt.
1090                  */
1091                 t3_intr_clear(adap);
1092
1093                 err = t3_init_hw(adap, 0);
1094                 if (err)
1095                         goto out;
1096
1097                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1098                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1099
1100                 err = setup_sge_qsets(adap);
1101                 if (err)
1102                         goto out;
1103
1104                 setup_rss(adap);
1105                 if (!(adap->flags & NAPI_INIT))
1106                         init_napi(adap);
1107
1108                 t3_start_sge_timers(adap);
1109                 adap->flags |= FULL_INIT_DONE;
1110         }
1111
1112         t3_intr_clear(adap);
1113
1114         if (adap->flags & USING_MSIX) {
1115                 name_msix_vecs(adap);
1116                 err = request_irq(adap->msix_info[0].vec,
1117                                   t3_async_intr_handler, 0,
1118                                   adap->msix_info[0].desc, adap);
1119                 if (err)
1120                         goto irq_err;
1121
1122                 err = request_msix_data_irqs(adap);
1123                 if (err) {
1124                         free_irq(adap->msix_info[0].vec, adap);
1125                         goto irq_err;
1126                 }
1127         } else if ((err = request_irq(adap->pdev->irq,
1128                                       t3_intr_handler(adap,
1129                                                       adap->sge.qs[0].rspq.
1130                                                       polling),
1131                                       (adap->flags & USING_MSI) ?
1132                                        0 : IRQF_SHARED,
1133                                       adap->name, adap)))
1134                 goto irq_err;
1135
1136         enable_all_napi(adap);
1137         t3_sge_start(adap);
1138         t3_intr_enable(adap);
1139
1140         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1141             is_offload(adap) && init_tp_parity(adap) == 0)
1142                 adap->flags |= TP_PARITY_INIT;
1143
1144         if (adap->flags & TP_PARITY_INIT) {
1145                 t3_write_reg(adap, A_TP_INT_CAUSE,
1146                              F_CMCACHEPERR | F_ARPLUTPERR);
1147                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1148         }
1149
1150         if (!(adap->flags & QUEUES_BOUND)) {
1151                 err = bind_qsets(adap);
1152                 if (err) {
1153                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1154                         t3_intr_disable(adap);
1155                         free_irq_resources(adap);
1156                         goto out;
1157                 }
1158                 adap->flags |= QUEUES_BOUND;
1159         }
1160
1161 out:
1162         return err;
1163 irq_err:
1164         CH_ERR(adap, "request_irq failed, err %d\n", err);
1165         goto out;
1166 }
1167
1168 /*
1169  * Release resources when all the ports and offloading have been stopped.
1170  */
1171 static void cxgb_down(struct adapter *adapter)
1172 {
1173         t3_sge_stop(adapter);
1174         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1175         t3_intr_disable(adapter);
1176         spin_unlock_irq(&adapter->work_lock);
1177
1178         free_irq_resources(adapter);
1179         quiesce_rx(adapter);
1180         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1181 }
1182
1183 static void schedule_chk_task(struct adapter *adap)
1184 {
1185         unsigned int timeo;
1186
1187         timeo = adap->params.linkpoll_period ?
1188             (HZ * adap->params.linkpoll_period) / 10 :
1189             adap->params.stats_update_period * HZ;
1190         if (timeo)
1191                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1192 }
1193
1194 static int offload_open(struct net_device *dev)
1195 {
1196         struct port_info *pi = netdev_priv(dev);
1197         struct adapter *adapter = pi->adapter;
1198         struct t3cdev *tdev = dev2t3cdev(dev);
1199         int adap_up = adapter->open_device_map & PORT_MASK;
1200         int err;
1201
1202         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1203                 return 0;
1204
1205         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1206                 goto out;
1207
1208         t3_tp_set_offload_mode(adapter, 1);
1209         tdev->lldev = adapter->port[0];
1210         err = cxgb3_offload_activate(adapter);
1211         if (err)
1212                 goto out;
1213
1214         init_port_mtus(adapter);
1215         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1216                      adapter->params.b_wnd,
1217                      adapter->params.rev == 0 ?
1218                      adapter->port[0]->mtu : 0xffff);
1219         init_smt(adapter);
1220
1221         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1222                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1223
1224         /* Call back all registered clients */
1225         cxgb3_add_clients(tdev);
1226
1227 out:
1228         /* restore them in case the offload module has changed them */
1229         if (err) {
1230                 t3_tp_set_offload_mode(adapter, 0);
1231                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1232                 cxgb3_set_dummy_ops(tdev);
1233         }
1234         return err;
1235 }
1236
1237 static int offload_close(struct t3cdev *tdev)
1238 {
1239         struct adapter *adapter = tdev2adap(tdev);
1240
1241         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1242                 return 0;
1243
1244         /* Call back all registered clients */
1245         cxgb3_remove_clients(tdev);
1246
1247         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1248
1249         /* Flush work scheduled while releasing TIDs */
1250         flush_scheduled_work();
1251
1252         tdev->lldev = NULL;
1253         cxgb3_set_dummy_ops(tdev);
1254         t3_tp_set_offload_mode(adapter, 0);
1255         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1256
1257         if (!adapter->open_device_map)
1258                 cxgb_down(adapter);
1259
1260         cxgb3_offload_deactivate(adapter);
1261         return 0;
1262 }
1263
1264 static int cxgb_open(struct net_device *dev)
1265 {
1266         struct port_info *pi = netdev_priv(dev);
1267         struct adapter *adapter = pi->adapter;
1268         int other_ports = adapter->open_device_map & PORT_MASK;
1269         int err;
1270
1271         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1272                 return err;
1273
1274         set_bit(pi->port_id, &adapter->open_device_map);
1275         if (is_offload(adapter) && !ofld_disable) {
1276                 err = offload_open(dev);
1277                 if (err)
1278                         printk(KERN_WARNING
1279                                "Could not initialize offload capabilities\n");
1280         }
1281
1282         dev->real_num_tx_queues = pi->nqsets;
1283         link_start(dev);
1284         t3_port_intr_enable(adapter, pi->port_id);
1285         netif_tx_start_all_queues(dev);
1286         if (!other_ports)
1287                 schedule_chk_task(adapter);
1288
1289         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
1290         return 0;
1291 }
1292
1293 static int cxgb_close(struct net_device *dev)
1294 {
1295         struct port_info *pi = netdev_priv(dev);
1296         struct adapter *adapter = pi->adapter;
1297
1298         
1299         if (!adapter->open_device_map)
1300                 return 0;
1301
1302         /* Stop link fault interrupts */
1303         t3_xgm_intr_disable(adapter, pi->port_id);
1304         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1305
1306         t3_port_intr_disable(adapter, pi->port_id);
1307         netif_tx_stop_all_queues(dev);
1308         pi->phy.ops->power_down(&pi->phy, 1);
1309         netif_carrier_off(dev);
1310         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1311
1312         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1313         clear_bit(pi->port_id, &adapter->open_device_map);
1314         spin_unlock_irq(&adapter->work_lock);
1315
1316         if (!(adapter->open_device_map & PORT_MASK))
1317                 cancel_delayed_work_sync(&adapter->adap_check_task);
1318
1319         if (!adapter->open_device_map)
1320                 cxgb_down(adapter);
1321
1322         cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
1323         return 0;
1324 }
1325
1326 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1327 {
1328         struct port_info *pi = netdev_priv(dev);
1329         struct adapter *adapter = pi->adapter;
1330         struct net_device_stats *ns = &pi->netstats;
1331         const struct mac_stats *pstats;
1332
1333         spin_lock(&adapter->stats_lock);
1334         pstats = t3_mac_update_stats(&pi->mac);
1335         spin_unlock(&adapter->stats_lock);
1336
1337         ns->tx_bytes = pstats->tx_octets;
1338         ns->tx_packets = pstats->tx_frames;
1339         ns->rx_bytes = pstats->rx_octets;
1340         ns->rx_packets = pstats->rx_frames;
1341         ns->multicast = pstats->rx_mcast_frames;
1342
1343         ns->tx_errors = pstats->tx_underrun;
1344         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1345             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1346             pstats->rx_fifo_ovfl;
1347
1348         /* detailed rx_errors */
1349         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1350         ns->rx_over_errors = 0;
1351         ns->rx_crc_errors = pstats->rx_fcs_errs;
1352         ns->rx_frame_errors = pstats->rx_symbol_errs;
1353         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1354         ns->rx_missed_errors = pstats->rx_cong_drops;
1355
1356         /* detailed tx_errors */
1357         ns->tx_aborted_errors = 0;
1358         ns->tx_carrier_errors = 0;
1359         ns->tx_fifo_errors = pstats->tx_underrun;
1360         ns->tx_heartbeat_errors = 0;
1361         ns->tx_window_errors = 0;
1362         return ns;
1363 }
1364
1365 static u32 get_msglevel(struct net_device *dev)
1366 {
1367         struct port_info *pi = netdev_priv(dev);
1368         struct adapter *adapter = pi->adapter;
1369
1370         return adapter->msg_enable;
1371 }
1372
1373 static void set_msglevel(struct net_device *dev, u32 val)
1374 {
1375         struct port_info *pi = netdev_priv(dev);
1376         struct adapter *adapter = pi->adapter;
1377
1378         adapter->msg_enable = val;
1379 }
1380
1381 static char stats_strings[][ETH_GSTRING_LEN] = {
1382         "TxOctetsOK         ",
1383         "TxFramesOK         ",
1384         "TxMulticastFramesOK",
1385         "TxBroadcastFramesOK",
1386         "TxPauseFrames      ",
1387         "TxUnderrun         ",
1388         "TxExtUnderrun      ",
1389
1390         "TxFrames64         ",
1391         "TxFrames65To127    ",
1392         "TxFrames128To255   ",
1393         "TxFrames256To511   ",
1394         "TxFrames512To1023  ",
1395         "TxFrames1024To1518 ",
1396         "TxFrames1519ToMax  ",
1397
1398         "RxOctetsOK         ",
1399         "RxFramesOK         ",
1400         "RxMulticastFramesOK",
1401         "RxBroadcastFramesOK",
1402         "RxPauseFrames      ",
1403         "RxFCSErrors        ",
1404         "RxSymbolErrors     ",
1405         "RxShortErrors      ",
1406         "RxJabberErrors     ",
1407         "RxLengthErrors     ",
1408         "RxFIFOoverflow     ",
1409
1410         "RxFrames64         ",
1411         "RxFrames65To127    ",
1412         "RxFrames128To255   ",
1413         "RxFrames256To511   ",
1414         "RxFrames512To1023  ",
1415         "RxFrames1024To1518 ",
1416         "RxFrames1519ToMax  ",
1417
1418         "PhyFIFOErrors      ",
1419         "TSO                ",
1420         "VLANextractions    ",
1421         "VLANinsertions     ",
1422         "TxCsumOffload      ",
1423         "RxCsumGood         ",
1424         "LroAggregated      ",
1425         "LroFlushed         ",
1426         "LroNoDesc          ",
1427         "RxDrops            ",
1428
1429         "CheckTXEnToggled   ",
1430         "CheckResets        ",
1431
1432         "LinkFaults         ",
1433 };
1434
1435 static int get_sset_count(struct net_device *dev, int sset)
1436 {
1437         switch (sset) {
1438         case ETH_SS_STATS:
1439                 return ARRAY_SIZE(stats_strings);
1440         default:
1441                 return -EOPNOTSUPP;
1442         }
1443 }
1444
1445 #define T3_REGMAP_SIZE (3 * 1024)
1446
1447 static int get_regs_len(struct net_device *dev)
1448 {
1449         return T3_REGMAP_SIZE;
1450 }
1451
1452 static int get_eeprom_len(struct net_device *dev)
1453 {
1454         return EEPROMSIZE;
1455 }
1456
1457 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1458 {
1459         struct port_info *pi = netdev_priv(dev);
1460         struct adapter *adapter = pi->adapter;
1461         u32 fw_vers = 0;
1462         u32 tp_vers = 0;
1463
1464         spin_lock(&adapter->stats_lock);
1465         t3_get_fw_version(adapter, &fw_vers);
1466         t3_get_tp_version(adapter, &tp_vers);
1467         spin_unlock(&adapter->stats_lock);
1468
1469         strcpy(info->driver, DRV_NAME);
1470         strcpy(info->version, DRV_VERSION);
1471         strcpy(info->bus_info, pci_name(adapter->pdev));
1472         if (!fw_vers)
1473                 strcpy(info->fw_version, "N/A");
1474         else {
1475                 snprintf(info->fw_version, sizeof(info->fw_version),
1476                          "%s %u.%u.%u TP %u.%u.%u",
1477                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1478                          G_FW_VERSION_MAJOR(fw_vers),
1479                          G_FW_VERSION_MINOR(fw_vers),
1480                          G_FW_VERSION_MICRO(fw_vers),
1481                          G_TP_VERSION_MAJOR(tp_vers),
1482                          G_TP_VERSION_MINOR(tp_vers),
1483                          G_TP_VERSION_MICRO(tp_vers));
1484         }
1485 }
1486
1487 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1488 {
1489         if (stringset == ETH_SS_STATS)
1490                 memcpy(data, stats_strings, sizeof(stats_strings));
1491 }
1492
1493 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1494                                             struct port_info *p, int idx)
1495 {
1496         int i;
1497         unsigned long tot = 0;
1498
1499         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1500                 tot += adapter->sge.qs[i].port_stats[idx];
1501         return tot;
1502 }
1503
1504 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1505                       u64 *data)
1506 {
1507         struct port_info *pi = netdev_priv(dev);
1508         struct adapter *adapter = pi->adapter;
1509         const struct mac_stats *s;
1510
1511         spin_lock(&adapter->stats_lock);
1512         s = t3_mac_update_stats(&pi->mac);
1513         spin_unlock(&adapter->stats_lock);
1514
1515         *data++ = s->tx_octets;
1516         *data++ = s->tx_frames;
1517         *data++ = s->tx_mcast_frames;
1518         *data++ = s->tx_bcast_frames;
1519         *data++ = s->tx_pause;
1520         *data++ = s->tx_underrun;
1521         *data++ = s->tx_fifo_urun;
1522
1523         *data++ = s->tx_frames_64;
1524         *data++ = s->tx_frames_65_127;
1525         *data++ = s->tx_frames_128_255;
1526         *data++ = s->tx_frames_256_511;
1527         *data++ = s->tx_frames_512_1023;
1528         *data++ = s->tx_frames_1024_1518;
1529         *data++ = s->tx_frames_1519_max;
1530
1531         *data++ = s->rx_octets;
1532         *data++ = s->rx_frames;
1533         *data++ = s->rx_mcast_frames;
1534         *data++ = s->rx_bcast_frames;
1535         *data++ = s->rx_pause;
1536         *data++ = s->rx_fcs_errs;
1537         *data++ = s->rx_symbol_errs;
1538         *data++ = s->rx_short;
1539         *data++ = s->rx_jabber;
1540         *data++ = s->rx_too_long;
1541         *data++ = s->rx_fifo_ovfl;
1542
1543         *data++ = s->rx_frames_64;
1544         *data++ = s->rx_frames_65_127;
1545         *data++ = s->rx_frames_128_255;
1546         *data++ = s->rx_frames_256_511;
1547         *data++ = s->rx_frames_512_1023;
1548         *data++ = s->rx_frames_1024_1518;
1549         *data++ = s->rx_frames_1519_max;
1550
1551         *data++ = pi->phy.fifo_errors;
1552
1553         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1554         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1555         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1556         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1557         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1558         *data++ = 0;
1559         *data++ = 0;
1560         *data++ = 0;
1561         *data++ = s->rx_cong_drops;
1562
1563         *data++ = s->num_toggled;
1564         *data++ = s->num_resets;
1565
1566         *data++ = s->link_faults;
1567 }
1568
1569 static inline void reg_block_dump(struct adapter *ap, void *buf,
1570                                   unsigned int start, unsigned int end)
1571 {
1572         u32 *p = buf + start;
1573
1574         for (; start <= end; start += sizeof(u32))
1575                 *p++ = t3_read_reg(ap, start);
1576 }
1577
1578 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1579                      void *buf)
1580 {
1581         struct port_info *pi = netdev_priv(dev);
1582         struct adapter *ap = pi->adapter;
1583
1584         /*
1585          * Version scheme:
1586          * bits 0..9: chip version
1587          * bits 10..15: chip revision
1588          * bit 31: set for PCIe cards
1589          */
1590         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1591
1592         /*
1593          * We skip the MAC statistics registers because they are clear-on-read.
1594          * Also reading multi-register stats would need to synchronize with the
1595          * periodic mac stats accumulation.  Hard to justify the complexity.
1596          */
1597         memset(buf, 0, T3_REGMAP_SIZE);
1598         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1599         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1600         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1601         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1602         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1603         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1604                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1605         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1606                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1607 }
1608
1609 static int restart_autoneg(struct net_device *dev)
1610 {
1611         struct port_info *p = netdev_priv(dev);
1612
1613         if (!netif_running(dev))
1614                 return -EAGAIN;
1615         if (p->link_config.autoneg != AUTONEG_ENABLE)
1616                 return -EINVAL;
1617         p->phy.ops->autoneg_restart(&p->phy);
1618         return 0;
1619 }
1620
1621 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1622 {
1623         struct port_info *pi = netdev_priv(dev);
1624         struct adapter *adapter = pi->adapter;
1625         int i;
1626
1627         if (data == 0)
1628                 data = 2;
1629
1630         for (i = 0; i < data * 2; i++) {
1631                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1632                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1633                 if (msleep_interruptible(500))
1634                         break;
1635         }
1636         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1637                          F_GPIO0_OUT_VAL);
1638         return 0;
1639 }
1640
1641 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1642 {
1643         struct port_info *p = netdev_priv(dev);
1644
1645         cmd->supported = p->link_config.supported;
1646         cmd->advertising = p->link_config.advertising;
1647
1648         if (netif_carrier_ok(dev)) {
1649                 cmd->speed = p->link_config.speed;
1650                 cmd->duplex = p->link_config.duplex;
1651         } else {
1652                 cmd->speed = -1;
1653                 cmd->duplex = -1;
1654         }
1655
1656         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1657         cmd->phy_address = p->phy.mdio.prtad;
1658         cmd->transceiver = XCVR_EXTERNAL;
1659         cmd->autoneg = p->link_config.autoneg;
1660         cmd->maxtxpkt = 0;
1661         cmd->maxrxpkt = 0;
1662         return 0;
1663 }
1664
1665 static int speed_duplex_to_caps(int speed, int duplex)
1666 {
1667         int cap = 0;
1668
1669         switch (speed) {
1670         case SPEED_10:
1671                 if (duplex == DUPLEX_FULL)
1672                         cap = SUPPORTED_10baseT_Full;
1673                 else
1674                         cap = SUPPORTED_10baseT_Half;
1675                 break;
1676         case SPEED_100:
1677                 if (duplex == DUPLEX_FULL)
1678                         cap = SUPPORTED_100baseT_Full;
1679                 else
1680                         cap = SUPPORTED_100baseT_Half;
1681                 break;
1682         case SPEED_1000:
1683                 if (duplex == DUPLEX_FULL)
1684                         cap = SUPPORTED_1000baseT_Full;
1685                 else
1686                         cap = SUPPORTED_1000baseT_Half;
1687                 break;
1688         case SPEED_10000:
1689                 if (duplex == DUPLEX_FULL)
1690                         cap = SUPPORTED_10000baseT_Full;
1691         }
1692         return cap;
1693 }
1694
1695 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1696                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1697                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1698                       ADVERTISED_10000baseT_Full)
1699
1700 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1701 {
1702         struct port_info *p = netdev_priv(dev);
1703         struct link_config *lc = &p->link_config;
1704
1705         if (!(lc->supported & SUPPORTED_Autoneg)) {
1706                 /*
1707                  * PHY offers a single speed/duplex.  See if that's what's
1708                  * being requested.
1709                  */
1710                 if (cmd->autoneg == AUTONEG_DISABLE) {
1711                         int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1712                         if (lc->supported & cap)
1713                                 return 0;
1714                 }
1715                 return -EINVAL;
1716         }
1717
1718         if (cmd->autoneg == AUTONEG_DISABLE) {
1719                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1720
1721                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1722                         return -EINVAL;
1723                 lc->requested_speed = cmd->speed;
1724                 lc->requested_duplex = cmd->duplex;
1725                 lc->advertising = 0;
1726         } else {
1727                 cmd->advertising &= ADVERTISED_MASK;
1728                 cmd->advertising &= lc->supported;
1729                 if (!cmd->advertising)
1730                         return -EINVAL;
1731                 lc->requested_speed = SPEED_INVALID;
1732                 lc->requested_duplex = DUPLEX_INVALID;
1733                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1734         }
1735         lc->autoneg = cmd->autoneg;
1736         if (netif_running(dev))
1737                 t3_link_start(&p->phy, &p->mac, lc);
1738         return 0;
1739 }
1740
1741 static void get_pauseparam(struct net_device *dev,
1742                            struct ethtool_pauseparam *epause)
1743 {
1744         struct port_info *p = netdev_priv(dev);
1745
1746         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1747         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1748         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1749 }
1750
1751 static int set_pauseparam(struct net_device *dev,
1752                           struct ethtool_pauseparam *epause)
1753 {
1754         struct port_info *p = netdev_priv(dev);
1755         struct link_config *lc = &p->link_config;
1756
1757         if (epause->autoneg == AUTONEG_DISABLE)
1758                 lc->requested_fc = 0;
1759         else if (lc->supported & SUPPORTED_Autoneg)
1760                 lc->requested_fc = PAUSE_AUTONEG;
1761         else
1762                 return -EINVAL;
1763
1764         if (epause->rx_pause)
1765                 lc->requested_fc |= PAUSE_RX;
1766         if (epause->tx_pause)
1767                 lc->requested_fc |= PAUSE_TX;
1768         if (lc->autoneg == AUTONEG_ENABLE) {
1769                 if (netif_running(dev))
1770                         t3_link_start(&p->phy, &p->mac, lc);
1771         } else {
1772                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1773                 if (netif_running(dev))
1774                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1775         }
1776         return 0;
1777 }
1778
1779 static u32 get_rx_csum(struct net_device *dev)
1780 {
1781         struct port_info *p = netdev_priv(dev);
1782
1783         return p->rx_offload & T3_RX_CSUM;
1784 }
1785
1786 static int set_rx_csum(struct net_device *dev, u32 data)
1787 {
1788         struct port_info *p = netdev_priv(dev);
1789
1790         if (data) {
1791                 p->rx_offload |= T3_RX_CSUM;
1792         } else {
1793                 int i;
1794
1795                 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1796                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1797                         set_qset_lro(dev, i, 0);
1798         }
1799         return 0;
1800 }
1801
1802 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1803 {
1804         struct port_info *pi = netdev_priv(dev);
1805         struct adapter *adapter = pi->adapter;
1806         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1807
1808         e->rx_max_pending = MAX_RX_BUFFERS;
1809         e->rx_mini_max_pending = 0;
1810         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1811         e->tx_max_pending = MAX_TXQ_ENTRIES;
1812
1813         e->rx_pending = q->fl_size;
1814         e->rx_mini_pending = q->rspq_size;
1815         e->rx_jumbo_pending = q->jumbo_size;
1816         e->tx_pending = q->txq_size[0];
1817 }
1818
1819 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1820 {
1821         struct port_info *pi = netdev_priv(dev);
1822         struct adapter *adapter = pi->adapter;
1823         struct qset_params *q;
1824         int i;
1825
1826         if (e->rx_pending > MAX_RX_BUFFERS ||
1827             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1828             e->tx_pending > MAX_TXQ_ENTRIES ||
1829             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1830             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1831             e->rx_pending < MIN_FL_ENTRIES ||
1832             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1833             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1834                 return -EINVAL;
1835
1836         if (adapter->flags & FULL_INIT_DONE)
1837                 return -EBUSY;
1838
1839         q = &adapter->params.sge.qset[pi->first_qset];
1840         for (i = 0; i < pi->nqsets; ++i, ++q) {
1841                 q->rspq_size = e->rx_mini_pending;
1842                 q->fl_size = e->rx_pending;
1843                 q->jumbo_size = e->rx_jumbo_pending;
1844                 q->txq_size[0] = e->tx_pending;
1845                 q->txq_size[1] = e->tx_pending;
1846                 q->txq_size[2] = e->tx_pending;
1847         }
1848         return 0;
1849 }
1850
1851 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1852 {
1853         struct port_info *pi = netdev_priv(dev);
1854         struct adapter *adapter = pi->adapter;
1855         struct qset_params *qsp = &adapter->params.sge.qset[0];
1856         struct sge_qset *qs = &adapter->sge.qs[0];
1857
1858         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1859                 return -EINVAL;
1860
1861         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1862         t3_update_qset_coalesce(qs, qsp);
1863         return 0;
1864 }
1865
1866 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1867 {
1868         struct port_info *pi = netdev_priv(dev);
1869         struct adapter *adapter = pi->adapter;
1870         struct qset_params *q = adapter->params.sge.qset;
1871
1872         c->rx_coalesce_usecs = q->coalesce_usecs;
1873         return 0;
1874 }
1875
1876 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1877                       u8 * data)
1878 {
1879         struct port_info *pi = netdev_priv(dev);
1880         struct adapter *adapter = pi->adapter;
1881         int i, err = 0;
1882
1883         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1884         if (!buf)
1885                 return -ENOMEM;
1886
1887         e->magic = EEPROM_MAGIC;
1888         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1889                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1890
1891         if (!err)
1892                 memcpy(data, buf + e->offset, e->len);
1893         kfree(buf);
1894         return err;
1895 }
1896
1897 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1898                       u8 * data)
1899 {
1900         struct port_info *pi = netdev_priv(dev);
1901         struct adapter *adapter = pi->adapter;
1902         u32 aligned_offset, aligned_len;
1903         __le32 *p;
1904         u8 *buf;
1905         int err;
1906
1907         if (eeprom->magic != EEPROM_MAGIC)
1908                 return -EINVAL;
1909
1910         aligned_offset = eeprom->offset & ~3;
1911         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1912
1913         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1914                 buf = kmalloc(aligned_len, GFP_KERNEL);
1915                 if (!buf)
1916                         return -ENOMEM;
1917                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1918                 if (!err && aligned_len > 4)
1919                         err = t3_seeprom_read(adapter,
1920                                               aligned_offset + aligned_len - 4,
1921                                               (__le32 *) & buf[aligned_len - 4]);
1922                 if (err)
1923                         goto out;
1924                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1925         } else
1926                 buf = data;
1927
1928         err = t3_seeprom_wp(adapter, 0);
1929         if (err)
1930                 goto out;
1931
1932         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1933                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1934                 aligned_offset += 4;
1935         }
1936
1937         if (!err)
1938                 err = t3_seeprom_wp(adapter, 1);
1939 out:
1940         if (buf != data)
1941                 kfree(buf);
1942         return err;
1943 }
1944
1945 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1946 {
1947         wol->supported = 0;
1948         wol->wolopts = 0;
1949         memset(&wol->sopass, 0, sizeof(wol->sopass));
1950 }
1951
1952 static const struct ethtool_ops cxgb_ethtool_ops = {
1953         .get_settings = get_settings,
1954         .set_settings = set_settings,
1955         .get_drvinfo = get_drvinfo,
1956         .get_msglevel = get_msglevel,
1957         .set_msglevel = set_msglevel,
1958         .get_ringparam = get_sge_param,
1959         .set_ringparam = set_sge_param,
1960         .get_coalesce = get_coalesce,
1961         .set_coalesce = set_coalesce,
1962         .get_eeprom_len = get_eeprom_len,
1963         .get_eeprom = get_eeprom,
1964         .set_eeprom = set_eeprom,
1965         .get_pauseparam = get_pauseparam,
1966         .set_pauseparam = set_pauseparam,
1967         .get_rx_csum = get_rx_csum,
1968         .set_rx_csum = set_rx_csum,
1969         .set_tx_csum = ethtool_op_set_tx_csum,
1970         .set_sg = ethtool_op_set_sg,
1971         .get_link = ethtool_op_get_link,
1972         .get_strings = get_strings,
1973         .phys_id = cxgb3_phys_id,
1974         .nway_reset = restart_autoneg,
1975         .get_sset_count = get_sset_count,
1976         .get_ethtool_stats = get_stats,
1977         .get_regs_len = get_regs_len,
1978         .get_regs = get_regs,
1979         .get_wol = get_wol,
1980         .set_tso = ethtool_op_set_tso,
1981 };
1982
1983 static int in_range(int val, int lo, int hi)
1984 {
1985         return val < 0 || (val <= hi && val >= lo);
1986 }
1987
1988 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1989 {
1990         struct port_info *pi = netdev_priv(dev);
1991         struct adapter *adapter = pi->adapter;
1992         u32 cmd;
1993         int ret;
1994
1995         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1996                 return -EFAULT;
1997
1998         switch (cmd) {
1999         case CHELSIO_SET_QSET_PARAMS:{
2000                 int i;
2001                 struct qset_params *q;
2002                 struct ch_qset_params t;
2003                 int q1 = pi->first_qset;
2004                 int nqsets = pi->nqsets;
2005
2006                 if (!capable(CAP_NET_ADMIN))
2007                         return -EPERM;
2008                 if (copy_from_user(&t, useraddr, sizeof(t)))
2009                         return -EFAULT;
2010                 if (t.qset_idx >= SGE_QSETS)
2011                         return -EINVAL;
2012                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2013                         !in_range(t.cong_thres, 0, 255) ||
2014                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2015                                 MAX_TXQ_ENTRIES) ||
2016                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2017                                 MAX_TXQ_ENTRIES) ||
2018                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2019                                 MAX_CTRL_TXQ_ENTRIES) ||
2020                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2021                                 MAX_RX_BUFFERS)
2022                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2023                                         MAX_RX_JUMBO_BUFFERS)
2024                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2025                                         MAX_RSPQ_ENTRIES))
2026                         return -EINVAL;
2027
2028                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2029                         for_each_port(adapter, i) {
2030                                 pi = adap2pinfo(adapter, i);
2031                                 if (t.qset_idx >= pi->first_qset &&
2032                                     t.qset_idx < pi->first_qset + pi->nqsets &&
2033                                     !(pi->rx_offload & T3_RX_CSUM))
2034                                         return -EINVAL;
2035                         }
2036
2037                 if ((adapter->flags & FULL_INIT_DONE) &&
2038                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2039                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2040                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2041                         t.polling >= 0 || t.cong_thres >= 0))
2042                         return -EBUSY;
2043
2044                 /* Allow setting of any available qset when offload enabled */
2045                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2046                         q1 = 0;
2047                         for_each_port(adapter, i) {
2048                                 pi = adap2pinfo(adapter, i);
2049                                 nqsets += pi->first_qset + pi->nqsets;
2050                         }
2051                 }
2052
2053                 if (t.qset_idx < q1)
2054                         return -EINVAL;
2055                 if (t.qset_idx > q1 + nqsets - 1)
2056                         return -EINVAL;
2057
2058                 q = &adapter->params.sge.qset[t.qset_idx];
2059
2060                 if (t.rspq_size >= 0)
2061                         q->rspq_size = t.rspq_size;
2062                 if (t.fl_size[0] >= 0)
2063                         q->fl_size = t.fl_size[0];
2064                 if (t.fl_size[1] >= 0)
2065                         q->jumbo_size = t.fl_size[1];
2066                 if (t.txq_size[0] >= 0)
2067                         q->txq_size[0] = t.txq_size[0];
2068                 if (t.txq_size[1] >= 0)
2069                         q->txq_size[1] = t.txq_size[1];
2070                 if (t.txq_size[2] >= 0)
2071                         q->txq_size[2] = t.txq_size[2];
2072                 if (t.cong_thres >= 0)
2073                         q->cong_thres = t.cong_thres;
2074                 if (t.intr_lat >= 0) {
2075                         struct sge_qset *qs =
2076                                 &adapter->sge.qs[t.qset_idx];
2077
2078                         q->coalesce_usecs = t.intr_lat;
2079                         t3_update_qset_coalesce(qs, q);
2080                 }
2081                 if (t.polling >= 0) {
2082                         if (adapter->flags & USING_MSIX)
2083                                 q->polling = t.polling;
2084                         else {
2085                                 /* No polling with INTx for T3A */
2086                                 if (adapter->params.rev == 0 &&
2087                                         !(adapter->flags & USING_MSI))
2088                                         t.polling = 0;
2089
2090                                 for (i = 0; i < SGE_QSETS; i++) {
2091                                         q = &adapter->params.sge.
2092                                                 qset[i];
2093                                         q->polling = t.polling;
2094                                 }
2095                         }
2096                 }
2097                 if (t.lro >= 0)
2098                         set_qset_lro(dev, t.qset_idx, t.lro);
2099
2100                 break;
2101         }
2102         case CHELSIO_GET_QSET_PARAMS:{
2103                 struct qset_params *q;
2104                 struct ch_qset_params t;
2105                 int q1 = pi->first_qset;
2106                 int nqsets = pi->nqsets;
2107                 int i;
2108
2109                 if (copy_from_user(&t, useraddr, sizeof(t)))
2110                         return -EFAULT;
2111
2112                 /* Display qsets for all ports when offload enabled */
2113                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2114                         q1 = 0;
2115                         for_each_port(adapter, i) {
2116                                 pi = adap2pinfo(adapter, i);
2117                                 nqsets = pi->first_qset + pi->nqsets;
2118                         }
2119                 }
2120
2121                 if (t.qset_idx >= nqsets)
2122                         return -EINVAL;
2123
2124                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2125                 t.rspq_size = q->rspq_size;
2126                 t.txq_size[0] = q->txq_size[0];
2127                 t.txq_size[1] = q->txq_size[1];
2128                 t.txq_size[2] = q->txq_size[2];
2129                 t.fl_size[0] = q->fl_size;
2130                 t.fl_size[1] = q->jumbo_size;
2131                 t.polling = q->polling;
2132                 t.lro = q->lro;
2133                 t.intr_lat = q->coalesce_usecs;
2134                 t.cong_thres = q->cong_thres;
2135                 t.qnum = q1;
2136
2137                 if (adapter->flags & USING_MSIX)
2138                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2139                 else
2140                         t.vector = adapter->pdev->irq;
2141
2142                 if (copy_to_user(useraddr, &t, sizeof(t)))
2143                         return -EFAULT;
2144                 break;
2145         }
2146         case CHELSIO_SET_QSET_NUM:{
2147                 struct ch_reg edata;
2148                 unsigned int i, first_qset = 0, other_qsets = 0;
2149
2150                 if (!capable(CAP_NET_ADMIN))
2151                         return -EPERM;
2152                 if (adapter->flags & FULL_INIT_DONE)
2153                         return -EBUSY;
2154                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2155                         return -EFAULT;
2156                 if (edata.val < 1 ||
2157                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2158                         return -EINVAL;
2159
2160                 for_each_port(adapter, i)
2161                         if (adapter->port[i] && adapter->port[i] != dev)
2162                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2163
2164                 if (edata.val + other_qsets > SGE_QSETS)
2165                         return -EINVAL;
2166
2167                 pi->nqsets = edata.val;
2168
2169                 for_each_port(adapter, i)
2170                         if (adapter->port[i]) {
2171                                 pi = adap2pinfo(adapter, i);
2172                                 pi->first_qset = first_qset;
2173                                 first_qset += pi->nqsets;
2174                         }
2175                 break;
2176         }
2177         case CHELSIO_GET_QSET_NUM:{
2178                 struct ch_reg edata;
2179
2180                 edata.cmd = CHELSIO_GET_QSET_NUM;
2181                 edata.val = pi->nqsets;
2182                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2183                         return -EFAULT;
2184                 break;
2185         }
2186         case CHELSIO_LOAD_FW:{
2187                 u8 *fw_data;
2188                 struct ch_mem_range t;
2189
2190                 if (!capable(CAP_SYS_RAWIO))
2191                         return -EPERM;
2192                 if (copy_from_user(&t, useraddr, sizeof(t)))
2193                         return -EFAULT;
2194                 /* Check t.len sanity ? */
2195                 fw_data = kmalloc(t.len, GFP_KERNEL);
2196                 if (!fw_data)
2197                         return -ENOMEM;
2198
2199                 if (copy_from_user
2200                         (fw_data, useraddr + sizeof(t), t.len)) {
2201                         kfree(fw_data);
2202                         return -EFAULT;
2203                 }
2204
2205                 ret = t3_load_fw(adapter, fw_data, t.len);
2206                 kfree(fw_data);
2207                 if (ret)
2208                         return ret;
2209                 break;
2210         }
2211         case CHELSIO_SETMTUTAB:{
2212                 struct ch_mtus m;
2213                 int i;
2214
2215                 if (!is_offload(adapter))
2216                         return -EOPNOTSUPP;
2217                 if (!capable(CAP_NET_ADMIN))
2218                         return -EPERM;
2219                 if (offload_running(adapter))
2220                         return -EBUSY;
2221                 if (copy_from_user(&m, useraddr, sizeof(m)))
2222                         return -EFAULT;
2223                 if (m.nmtus != NMTUS)
2224                         return -EINVAL;
2225                 if (m.mtus[0] < 81)     /* accommodate SACK */
2226                         return -EINVAL;
2227
2228                 /* MTUs must be in ascending order */
2229                 for (i = 1; i < NMTUS; ++i)
2230                         if (m.mtus[i] < m.mtus[i - 1])
2231                                 return -EINVAL;
2232
2233                 memcpy(adapter->params.mtus, m.mtus,
2234                         sizeof(adapter->params.mtus));
2235                 break;
2236         }
2237         case CHELSIO_GET_PM:{
2238                 struct tp_params *p = &adapter->params.tp;
2239                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2240
2241                 if (!is_offload(adapter))
2242                         return -EOPNOTSUPP;
2243                 m.tx_pg_sz = p->tx_pg_size;
2244                 m.tx_num_pg = p->tx_num_pgs;
2245                 m.rx_pg_sz = p->rx_pg_size;
2246                 m.rx_num_pg = p->rx_num_pgs;
2247                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2248                 if (copy_to_user(useraddr, &m, sizeof(m)))
2249                         return -EFAULT;
2250                 break;
2251         }
2252         case CHELSIO_SET_PM:{
2253                 struct ch_pm m;
2254                 struct tp_params *p = &adapter->params.tp;
2255
2256                 if (!is_offload(adapter))
2257                         return -EOPNOTSUPP;
2258                 if (!capable(CAP_NET_ADMIN))
2259                         return -EPERM;
2260                 if (adapter->flags & FULL_INIT_DONE)
2261                         return -EBUSY;
2262                 if (copy_from_user(&m, useraddr, sizeof(m)))
2263                         return -EFAULT;
2264                 if (!is_power_of_2(m.rx_pg_sz) ||
2265                         !is_power_of_2(m.tx_pg_sz))
2266                         return -EINVAL; /* not power of 2 */
2267                 if (!(m.rx_pg_sz & 0x14000))
2268                         return -EINVAL; /* not 16KB or 64KB */
2269                 if (!(m.tx_pg_sz & 0x1554000))
2270                         return -EINVAL;
2271                 if (m.tx_num_pg == -1)
2272                         m.tx_num_pg = p->tx_num_pgs;
2273                 if (m.rx_num_pg == -1)
2274                         m.rx_num_pg = p->rx_num_pgs;
2275                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2276                         return -EINVAL;
2277                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2278                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2279                         return -EINVAL;
2280                 p->rx_pg_size = m.rx_pg_sz;
2281                 p->tx_pg_size = m.tx_pg_sz;
2282                 p->rx_num_pgs = m.rx_num_pg;
2283                 p->tx_num_pgs = m.tx_num_pg;
2284                 break;
2285         }
2286         case CHELSIO_GET_MEM:{
2287                 struct ch_mem_range t;
2288                 struct mc7 *mem;
2289                 u64 buf[32];
2290
2291                 if (!is_offload(adapter))
2292                         return -EOPNOTSUPP;
2293                 if (!(adapter->flags & FULL_INIT_DONE))
2294                         return -EIO;    /* need the memory controllers */
2295                 if (copy_from_user(&t, useraddr, sizeof(t)))
2296                         return -EFAULT;
2297                 if ((t.addr & 7) || (t.len & 7))
2298                         return -EINVAL;
2299                 if (t.mem_id == MEM_CM)
2300                         mem = &adapter->cm;
2301                 else if (t.mem_id == MEM_PMRX)
2302                         mem = &adapter->pmrx;
2303                 else if (t.mem_id == MEM_PMTX)
2304                         mem = &adapter->pmtx;
2305                 else
2306                         return -EINVAL;
2307
2308                 /*
2309                  * Version scheme:
2310                  * bits 0..9: chip version
2311                  * bits 10..15: chip revision
2312                  */
2313                 t.version = 3 | (adapter->params.rev << 10);
2314                 if (copy_to_user(useraddr, &t, sizeof(t)))
2315                         return -EFAULT;
2316
2317                 /*
2318                  * Read 256 bytes at a time as len can be large and we don't
2319                  * want to use huge intermediate buffers.
2320                  */
2321                 useraddr += sizeof(t);  /* advance to start of buffer */
2322                 while (t.len) {
2323                         unsigned int chunk =
2324                                 min_t(unsigned int, t.len, sizeof(buf));
2325
2326                         ret =
2327                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2328                                                 buf);
2329                         if (ret)
2330                                 return ret;
2331                         if (copy_to_user(useraddr, buf, chunk))
2332                                 return -EFAULT;
2333                         useraddr += chunk;
2334                         t.addr += chunk;
2335                         t.len -= chunk;
2336                 }
2337                 break;
2338         }
2339         case CHELSIO_SET_TRACE_FILTER:{
2340                 struct ch_trace t;
2341                 const struct trace_params *tp;
2342
2343                 if (!capable(CAP_NET_ADMIN))
2344                         return -EPERM;
2345                 if (!offload_running(adapter))
2346                         return -EAGAIN;
2347                 if (copy_from_user(&t, useraddr, sizeof(t)))
2348                         return -EFAULT;
2349
2350                 tp = (const struct trace_params *)&t.sip;
2351                 if (t.config_tx)
2352                         t3_config_trace_filter(adapter, tp, 0,
2353                                                 t.invert_match,
2354                                                 t.trace_tx);
2355                 if (t.config_rx)
2356                         t3_config_trace_filter(adapter, tp, 1,
2357                                                 t.invert_match,
2358                                                 t.trace_rx);
2359                 break;
2360         }
2361         default:
2362                 return -EOPNOTSUPP;
2363         }
2364         return 0;
2365 }
2366
2367 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2368 {
2369         struct mii_ioctl_data *data = if_mii(req);
2370         struct port_info *pi = netdev_priv(dev);
2371         struct adapter *adapter = pi->adapter;
2372
2373         switch (cmd) {
2374         case SIOCGMIIREG:
2375         case SIOCSMIIREG:
2376                 /* Convert phy_id from older PRTAD/DEVAD format */
2377                 if (is_10G(adapter) &&
2378                     !mdio_phy_id_is_c45(data->phy_id) &&
2379                     (data->phy_id & 0x1f00) &&
2380                     !(data->phy_id & 0xe0e0))
2381                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2382                                                        data->phy_id & 0x1f);
2383                 /* FALLTHRU */
2384         case SIOCGMIIPHY:
2385                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2386         case SIOCCHIOCTL:
2387                 return cxgb_extension_ioctl(dev, req->ifr_data);
2388         default:
2389                 return -EOPNOTSUPP;
2390         }
2391 }
2392
2393 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2394 {
2395         struct port_info *pi = netdev_priv(dev);
2396         struct adapter *adapter = pi->adapter;
2397         int ret;
2398
2399         if (new_mtu < 81)       /* accommodate SACK */
2400                 return -EINVAL;
2401         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2402                 return ret;
2403         dev->mtu = new_mtu;
2404         init_port_mtus(adapter);
2405         if (adapter->params.rev == 0 && offload_running(adapter))
2406                 t3_load_mtus(adapter, adapter->params.mtus,
2407                              adapter->params.a_wnd, adapter->params.b_wnd,
2408                              adapter->port[0]->mtu);
2409         return 0;
2410 }
2411
2412 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2413 {
2414         struct port_info *pi = netdev_priv(dev);
2415         struct adapter *adapter = pi->adapter;
2416         struct sockaddr *addr = p;
2417
2418         if (!is_valid_ether_addr(addr->sa_data))
2419                 return -EINVAL;
2420
2421         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2422         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2423         if (offload_running(adapter))
2424                 write_smt_entry(adapter, pi->port_id);
2425         return 0;
2426 }
2427
2428 /**
2429  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2430  * @adap: the adapter
2431  * @p: the port
2432  *
2433  * Ensures that current Rx processing on any of the queues associated with
2434  * the given port completes before returning.  We do this by acquiring and
2435  * releasing the locks of the response queues associated with the port.
2436  */
2437 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2438 {
2439         int i;
2440
2441         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2442                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2443
2444                 spin_lock_irq(&q->lock);
2445                 spin_unlock_irq(&q->lock);
2446         }
2447 }
2448
2449 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2450 {
2451         struct port_info *pi = netdev_priv(dev);
2452         struct adapter *adapter = pi->adapter;
2453
2454         pi->vlan_grp = grp;
2455         if (adapter->params.rev > 0)
2456                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2457         else {
2458                 /* single control for all ports */
2459                 unsigned int i, have_vlans = 0;
2460                 for_each_port(adapter, i)
2461                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2462
2463                 t3_set_vlan_accel(adapter, 1, have_vlans);
2464         }
2465         t3_synchronize_rx(adapter, pi);
2466 }
2467
2468 #ifdef CONFIG_NET_POLL_CONTROLLER
2469 static void cxgb_netpoll(struct net_device *dev)
2470 {
2471         struct port_info *pi = netdev_priv(dev);
2472         struct adapter *adapter = pi->adapter;
2473         int qidx;
2474
2475         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2476                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2477                 void *source;
2478
2479                 if (adapter->flags & USING_MSIX)
2480                         source = qs;
2481                 else
2482                         source = adapter;
2483
2484                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2485         }
2486 }
2487 #endif
2488
2489 /*
2490  * Periodic accumulation of MAC statistics.
2491  */
2492 static void mac_stats_update(struct adapter *adapter)
2493 {
2494         int i;
2495
2496         for_each_port(adapter, i) {
2497                 struct net_device *dev = adapter->port[i];
2498                 struct port_info *p = netdev_priv(dev);
2499
2500                 if (netif_running(dev)) {
2501                         spin_lock(&adapter->stats_lock);
2502                         t3_mac_update_stats(&p->mac);
2503                         spin_unlock(&adapter->stats_lock);
2504                 }
2505         }
2506 }
2507
2508 static void check_link_status(struct adapter *adapter)
2509 {
2510         int i;
2511
2512         for_each_port(adapter, i) {
2513                 struct net_device *dev = adapter->port[i];
2514                 struct port_info *p = netdev_priv(dev);
2515                 int link_fault;
2516
2517                 spin_lock_irq(&adapter->work_lock);
2518                 link_fault = p->link_fault;
2519                 spin_unlock_irq(&adapter->work_lock);
2520
2521                 if (link_fault) {
2522                         t3_link_fault(adapter, i);
2523                         continue;
2524                 }
2525
2526                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2527                         t3_xgm_intr_disable(adapter, i);
2528                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2529
2530                         t3_link_changed(adapter, i);
2531                         t3_xgm_intr_enable(adapter, i);
2532                 }
2533         }
2534 }
2535
2536 static void check_t3b2_mac(struct adapter *adapter)
2537 {
2538         int i;
2539
2540         if (!rtnl_trylock())    /* synchronize with ifdown */
2541                 return;
2542
2543         for_each_port(adapter, i) {
2544                 struct net_device *dev = adapter->port[i];
2545                 struct port_info *p = netdev_priv(dev);
2546                 int status;
2547
2548                 if (!netif_running(dev))
2549                         continue;
2550
2551                 status = 0;
2552                 if (netif_running(dev) && netif_carrier_ok(dev))
2553                         status = t3b2_mac_watchdog_task(&p->mac);
2554                 if (status == 1)
2555                         p->mac.stats.num_toggled++;
2556                 else if (status == 2) {
2557                         struct cmac *mac = &p->mac;
2558
2559                         t3_mac_set_mtu(mac, dev->mtu);
2560                         t3_mac_set_address(mac, 0, dev->dev_addr);
2561                         cxgb_set_rxmode(dev);
2562                         t3_link_start(&p->phy, mac, &p->link_config);
2563                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2564                         t3_port_intr_enable(adapter, p->port_id);
2565                         p->mac.stats.num_resets++;
2566                 }
2567         }
2568         rtnl_unlock();
2569 }
2570
2571
2572 static void t3_adap_check_task(struct work_struct *work)
2573 {
2574         struct adapter *adapter = container_of(work, struct adapter,
2575                                                adap_check_task.work);
2576         const struct adapter_params *p = &adapter->params;
2577         int port;
2578         unsigned int v, status, reset;
2579
2580         adapter->check_task_cnt++;
2581
2582         check_link_status(adapter);
2583
2584         /* Accumulate MAC stats if needed */
2585         if (!p->linkpoll_period ||
2586             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2587             p->stats_update_period) {
2588                 mac_stats_update(adapter);
2589                 adapter->check_task_cnt = 0;
2590         }
2591
2592         if (p->rev == T3_REV_B2)
2593                 check_t3b2_mac(adapter);
2594
2595         /*
2596          * Scan the XGMAC's to check for various conditions which we want to
2597          * monitor in a periodic polling manner rather than via an interrupt
2598          * condition.  This is used for conditions which would otherwise flood
2599          * the system with interrupts and we only really need to know that the
2600          * conditions are "happening" ...  For each condition we count the
2601          * detection of the condition and reset it for the next polling loop.
2602          */
2603         for_each_port(adapter, port) {
2604                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2605                 u32 cause;
2606
2607                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2608                 reset = 0;
2609                 if (cause & F_RXFIFO_OVERFLOW) {
2610                         mac->stats.rx_fifo_ovfl++;
2611                         reset |= F_RXFIFO_OVERFLOW;
2612                 }
2613
2614                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2615         }
2616
2617         /*
2618          * We do the same as above for FL_EMPTY interrupts.
2619          */
2620         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2621         reset = 0;
2622
2623         if (status & F_FLEMPTY) {
2624                 struct sge_qset *qs = &adapter->sge.qs[0];
2625                 int i = 0;
2626
2627                 reset |= F_FLEMPTY;
2628
2629                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2630                     0xffff;
2631
2632                 while (v) {
2633                         qs->fl[i].empty += (v & 1);
2634                         if (i)
2635                                 qs++;
2636                         i ^= 1;
2637                         v >>= 1;
2638                 }
2639         }
2640
2641         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2642
2643         /* Schedule the next check update if any port is active. */
2644         spin_lock_irq(&adapter->work_lock);
2645         if (adapter->open_device_map & PORT_MASK)
2646                 schedule_chk_task(adapter);
2647         spin_unlock_irq(&adapter->work_lock);
2648 }
2649
2650 /*
2651  * Processes external (PHY) interrupts in process context.
2652  */
2653 static void ext_intr_task(struct work_struct *work)
2654 {
2655         struct adapter *adapter = container_of(work, struct adapter,
2656                                                ext_intr_handler_task);
2657         int i;
2658
2659         /* Disable link fault interrupts */
2660         for_each_port(adapter, i) {
2661                 struct net_device *dev = adapter->port[i];
2662                 struct port_info *p = netdev_priv(dev);
2663
2664                 t3_xgm_intr_disable(adapter, i);
2665                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2666         }
2667
2668         /* Re-enable link fault interrupts */
2669         t3_phy_intr_handler(adapter);
2670
2671         for_each_port(adapter, i)
2672                 t3_xgm_intr_enable(adapter, i);
2673
2674         /* Now reenable external interrupts */
2675         spin_lock_irq(&adapter->work_lock);
2676         if (adapter->slow_intr_mask) {
2677                 adapter->slow_intr_mask |= F_T3DBG;
2678                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2679                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2680                              adapter->slow_intr_mask);
2681         }
2682         spin_unlock_irq(&adapter->work_lock);
2683 }
2684
2685 /*
2686  * Interrupt-context handler for external (PHY) interrupts.
2687  */
2688 void t3_os_ext_intr_handler(struct adapter *adapter)
2689 {
2690         /*
2691          * Schedule a task to handle external interrupts as they may be slow
2692          * and we use a mutex to protect MDIO registers.  We disable PHY
2693          * interrupts in the meantime and let the task reenable them when
2694          * it's done.
2695          */
2696         spin_lock(&adapter->work_lock);
2697         if (adapter->slow_intr_mask) {
2698                 adapter->slow_intr_mask &= ~F_T3DBG;
2699                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2700                              adapter->slow_intr_mask);
2701                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2702         }
2703         spin_unlock(&adapter->work_lock);
2704 }
2705
2706 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2707 {
2708         struct net_device *netdev = adapter->port[port_id];
2709         struct port_info *pi = netdev_priv(netdev);
2710
2711         spin_lock(&adapter->work_lock);
2712         pi->link_fault = 1;
2713         spin_unlock(&adapter->work_lock);
2714 }
2715
2716 static int t3_adapter_error(struct adapter *adapter, int reset)
2717 {
2718         int i, ret = 0;
2719
2720         if (is_offload(adapter) &&
2721             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2722                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2723                 offload_close(&adapter->tdev);
2724         }
2725
2726         /* Stop all ports */
2727         for_each_port(adapter, i) {
2728                 struct net_device *netdev = adapter->port[i];
2729
2730                 if (netif_running(netdev))
2731                         cxgb_close(netdev);
2732         }
2733
2734         /* Stop SGE timers */
2735         t3_stop_sge_timers(adapter);
2736
2737         adapter->flags &= ~FULL_INIT_DONE;
2738
2739         if (reset)
2740                 ret = t3_reset_adapter(adapter);
2741
2742         pci_disable_device(adapter->pdev);
2743
2744         return ret;
2745 }
2746
2747 static int t3_reenable_adapter(struct adapter *adapter)
2748 {
2749         if (pci_enable_device(adapter->pdev)) {
2750                 dev_err(&adapter->pdev->dev,
2751                         "Cannot re-enable PCI device after reset.\n");
2752                 goto err;
2753         }
2754         pci_set_master(adapter->pdev);
2755         pci_restore_state(adapter->pdev);
2756
2757         /* Free sge resources */
2758         t3_free_sge_resources(adapter);
2759
2760         if (t3_replay_prep_adapter(adapter))
2761                 goto err;
2762
2763         return 0;
2764 err:
2765         return -1;
2766 }
2767
2768 static void t3_resume_ports(struct adapter *adapter)
2769 {
2770         int i;
2771
2772         /* Restart the ports */
2773         for_each_port(adapter, i) {
2774                 struct net_device *netdev = adapter->port[i];
2775
2776                 if (netif_running(netdev)) {
2777                         if (cxgb_open(netdev)) {
2778                                 dev_err(&adapter->pdev->dev,
2779                                         "can't bring device back up"
2780                                         " after reset\n");
2781                                 continue;
2782                         }
2783                 }
2784         }
2785
2786         if (is_offload(adapter) && !ofld_disable)
2787                 cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2788 }
2789
2790 /*
2791  * processes a fatal error.
2792  * Bring the ports down, reset the chip, bring the ports back up.
2793  */
2794 static void fatal_error_task(struct work_struct *work)
2795 {
2796         struct adapter *adapter = container_of(work, struct adapter,
2797                                                fatal_error_handler_task);
2798         int err = 0;
2799
2800         rtnl_lock();
2801         err = t3_adapter_error(adapter, 1);
2802         if (!err)
2803                 err = t3_reenable_adapter(adapter);
2804         if (!err)
2805                 t3_resume_ports(adapter);
2806
2807         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2808         rtnl_unlock();
2809 }
2810
2811 void t3_fatal_err(struct adapter *adapter)
2812 {
2813         unsigned int fw_status[4];
2814
2815         if (adapter->flags & FULL_INIT_DONE) {
2816                 t3_sge_stop(adapter);
2817                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2818                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2819                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2820                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2821
2822                 spin_lock(&adapter->work_lock);
2823                 t3_intr_disable(adapter);
2824                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2825                 spin_unlock(&adapter->work_lock);
2826         }
2827         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2828         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2829                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2830                          fw_status[0], fw_status[1],
2831                          fw_status[2], fw_status[3]);
2832 }
2833
2834 /**
2835  * t3_io_error_detected - called when PCI error is detected
2836  * @pdev: Pointer to PCI device
2837  * @state: The current pci connection state
2838  *
2839  * This function is called after a PCI bus error affecting
2840  * this device has been detected.
2841  */
2842 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2843                                              pci_channel_state_t state)
2844 {
2845         struct adapter *adapter = pci_get_drvdata(pdev);
2846         int ret;
2847
2848         if (state == pci_channel_io_perm_failure)
2849                 return PCI_ERS_RESULT_DISCONNECT;
2850
2851         ret = t3_adapter_error(adapter, 0);
2852
2853         /* Request a slot reset. */
2854         return PCI_ERS_RESULT_NEED_RESET;
2855 }
2856
2857 /**
2858  * t3_io_slot_reset - called after the pci bus has been reset.
2859  * @pdev: Pointer to PCI device
2860  *
2861  * Restart the card from scratch, as if from a cold-boot.
2862  */
2863 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2864 {
2865         struct adapter *adapter = pci_get_drvdata(pdev);
2866
2867         if (!t3_reenable_adapter(adapter))
2868                 return PCI_ERS_RESULT_RECOVERED;
2869
2870         return PCI_ERS_RESULT_DISCONNECT;
2871 }
2872
2873 /**
2874  * t3_io_resume - called when traffic can start flowing again.
2875  * @pdev: Pointer to PCI device
2876  *
2877  * This callback is called when the error recovery driver tells us that
2878  * its OK to resume normal operation.
2879  */
2880 static void t3_io_resume(struct pci_dev *pdev)
2881 {
2882         struct adapter *adapter = pci_get_drvdata(pdev);
2883
2884         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2885                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
2886
2887         t3_resume_ports(adapter);
2888 }
2889
2890 static struct pci_error_handlers t3_err_handler = {
2891         .error_detected = t3_io_error_detected,
2892         .slot_reset = t3_io_slot_reset,
2893         .resume = t3_io_resume,
2894 };
2895
2896 /*
2897  * Set the number of qsets based on the number of CPUs and the number of ports,
2898  * not to exceed the number of available qsets, assuming there are enough qsets
2899  * per port in HW.
2900  */
2901 static void set_nqsets(struct adapter *adap)
2902 {
2903         int i, j = 0;
2904         int num_cpus = num_online_cpus();
2905         int hwports = adap->params.nports;
2906         int nqsets = adap->msix_nvectors - 1;
2907
2908         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2909                 if (hwports == 2 &&
2910                     (hwports * nqsets > SGE_QSETS ||
2911                      num_cpus >= nqsets / hwports))
2912                         nqsets /= hwports;
2913                 if (nqsets > num_cpus)
2914                         nqsets = num_cpus;
2915                 if (nqsets < 1 || hwports == 4)
2916                         nqsets = 1;
2917         } else
2918                 nqsets = 1;
2919
2920         for_each_port(adap, i) {
2921                 struct port_info *pi = adap2pinfo(adap, i);
2922
2923                 pi->first_qset = j;
2924                 pi->nqsets = nqsets;
2925                 j = pi->first_qset + nqsets;
2926
2927                 dev_info(&adap->pdev->dev,
2928                          "Port %d using %d queue sets.\n", i, nqsets);
2929         }
2930 }
2931
2932 static int __devinit cxgb_enable_msix(struct adapter *adap)
2933 {
2934         struct msix_entry entries[SGE_QSETS + 1];
2935         int vectors;
2936         int i, err;
2937
2938         vectors = ARRAY_SIZE(entries);
2939         for (i = 0; i < vectors; ++i)
2940                 entries[i].entry = i;
2941
2942         while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2943                 vectors = err;
2944
2945         if (err < 0)
2946                 pci_disable_msix(adap->pdev);
2947
2948         if (!err && vectors < (adap->params.nports + 1)) {
2949                 pci_disable_msix(adap->pdev);
2950                 err = -1;
2951         }
2952
2953         if (!err) {
2954                 for (i = 0; i < vectors; ++i)
2955                         adap->msix_info[i].vec = entries[i].vector;
2956                 adap->msix_nvectors = vectors;
2957         }
2958
2959         return err;
2960 }
2961
2962 static void __devinit print_port_info(struct adapter *adap,
2963                                       const struct adapter_info *ai)
2964 {
2965         static const char *pci_variant[] = {
2966                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2967         };
2968
2969         int i;
2970         char buf[80];
2971
2972         if (is_pcie(adap))
2973                 snprintf(buf, sizeof(buf), "%s x%d",
2974                          pci_variant[adap->params.pci.variant],
2975                          adap->params.pci.width);
2976         else
2977                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2978                          pci_variant[adap->params.pci.variant],
2979                          adap->params.pci.speed, adap->params.pci.width);
2980
2981         for_each_port(adap, i) {
2982                 struct net_device *dev = adap->port[i];
2983                 const struct port_info *pi = netdev_priv(dev);
2984
2985                 if (!test_bit(i, &adap->registered_device_map))
2986                         continue;
2987                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2988                        dev->name, ai->desc, pi->phy.desc,
2989                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2990                        (adap->flags & USING_MSIX) ? " MSI-X" :
2991                        (adap->flags & USING_MSI) ? " MSI" : "");
2992                 if (adap->name == dev->name && adap->params.vpd.mclk)
2993                         printk(KERN_INFO
2994                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2995                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2996                                t3_mc7_size(&adap->pmtx) >> 20,
2997                                t3_mc7_size(&adap->pmrx) >> 20,
2998                                adap->params.vpd.sn);
2999         }
3000 }
3001
3002 static const struct net_device_ops cxgb_netdev_ops = {
3003         .ndo_open               = cxgb_open,
3004         .ndo_stop               = cxgb_close,
3005         .ndo_start_xmit         = t3_eth_xmit,
3006         .ndo_get_stats          = cxgb_get_stats,
3007         .ndo_validate_addr      = eth_validate_addr,
3008         .ndo_set_multicast_list = cxgb_set_rxmode,
3009         .ndo_do_ioctl           = cxgb_ioctl,
3010         .ndo_change_mtu         = cxgb_change_mtu,
3011         .ndo_set_mac_address    = cxgb_set_mac_addr,
3012         .ndo_vlan_rx_register   = vlan_rx_register,
3013 #ifdef CONFIG_NET_POLL_CONTROLLER
3014         .ndo_poll_controller    = cxgb_netpoll,
3015 #endif
3016 };
3017
3018 static int __devinit init_one(struct pci_dev *pdev,
3019                               const struct pci_device_id *ent)
3020 {
3021         static int version_printed;
3022
3023         int i, err, pci_using_dac = 0;
3024         resource_size_t mmio_start, mmio_len;
3025         const struct adapter_info *ai;
3026         struct adapter *adapter = NULL;
3027         struct port_info *pi;
3028
3029         if (!version_printed) {
3030                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3031                 ++version_printed;
3032         }
3033
3034         if (!cxgb3_wq) {
3035                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3036                 if (!cxgb3_wq) {
3037                         printk(KERN_ERR DRV_NAME
3038                                ": cannot initialize work queue\n");
3039                         return -ENOMEM;
3040                 }
3041         }
3042
3043         err = pci_request_regions(pdev, DRV_NAME);
3044         if (err) {
3045                 /* Just info, some other driver may have claimed the device. */
3046                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3047                 return err;
3048         }
3049
3050         err = pci_enable_device(pdev);
3051         if (err) {
3052                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3053                 goto out_release_regions;
3054         }
3055
3056         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3057                 pci_using_dac = 1;
3058                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3059                 if (err) {
3060                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3061                                "coherent allocations\n");
3062                         goto out_disable_device;
3063                 }
3064         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3065                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3066                 goto out_disable_device;
3067         }
3068
3069         pci_set_master(pdev);
3070         pci_save_state(pdev);
3071
3072         mmio_start = pci_resource_start(pdev, 0);
3073         mmio_len = pci_resource_len(pdev, 0);
3074         ai = t3_get_adapter_info(ent->driver_data);
3075
3076         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3077         if (!adapter) {
3078                 err = -ENOMEM;
3079                 goto out_disable_device;
3080         }
3081
3082         adapter->nofail_skb =
3083                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3084         if (!adapter->nofail_skb) {
3085                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3086                 err = -ENOMEM;
3087                 goto out_free_adapter;
3088         }
3089
3090         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3091         if (!adapter->regs) {
3092                 dev_err(&pdev->dev, "cannot map device registers\n");
3093                 err = -ENOMEM;
3094                 goto out_free_adapter;
3095         }
3096
3097         adapter->pdev = pdev;
3098         adapter->name = pci_name(pdev);
3099         adapter->msg_enable = dflt_msg_enable;
3100         adapter->mmio_len = mmio_len;
3101
3102         mutex_init(&adapter->mdio_lock);
3103         spin_lock_init(&adapter->work_lock);
3104         spin_lock_init(&adapter->stats_lock);
3105
3106         INIT_LIST_HEAD(&adapter->adapter_list);
3107         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3108         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3109         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3110
3111         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3112                 struct net_device *netdev;
3113
3114                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3115                 if (!netdev) {
3116                         err = -ENOMEM;
3117                         goto out_free_dev;
3118                 }
3119
3120                 SET_NETDEV_DEV(netdev, &pdev->dev);
3121
3122                 adapter->port[i] = netdev;
3123                 pi = netdev_priv(netdev);
3124                 pi->adapter = adapter;
3125                 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3126                 pi->port_id = i;
3127                 netif_carrier_off(netdev);
3128                 netif_tx_stop_all_queues(netdev);
3129                 netdev->irq = pdev->irq;
3130                 netdev->mem_start = mmio_start;
3131                 netdev->mem_end = mmio_start + mmio_len - 1;
3132                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3133                 netdev->features |= NETIF_F_GRO;
3134                 if (pci_using_dac)
3135                         netdev->features |= NETIF_F_HIGHDMA;
3136
3137                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3138                 netdev->netdev_ops = &cxgb_netdev_ops;
3139                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3140         }
3141
3142         pci_set_drvdata(pdev, adapter);
3143         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3144                 err = -ENODEV;
3145                 goto out_free_dev;
3146         }
3147
3148         /*
3149          * The card is now ready to go.  If any errors occur during device
3150          * registration we do not fail the whole card but rather proceed only
3151          * with the ports we manage to register successfully.  However we must
3152          * register at least one net device.
3153          */
3154         for_each_port(adapter, i) {
3155                 err = register_netdev(adapter->port[i]);
3156                 if (err)
3157                         dev_warn(&pdev->dev,
3158                                  "cannot register net device %s, skipping\n",
3159                                  adapter->port[i]->name);
3160                 else {
3161                         /*
3162                          * Change the name we use for messages to the name of
3163                          * the first successfully registered interface.
3164                          */
3165                         if (!adapter->registered_device_map)
3166                                 adapter->name = adapter->port[i]->name;
3167
3168                         __set_bit(i, &adapter->registered_device_map);
3169                 }
3170         }
3171         if (!adapter->registered_device_map) {
3172                 dev_err(&pdev->dev, "could not register any net devices\n");
3173                 goto out_free_dev;
3174         }
3175
3176         /* Driver's ready. Reflect it on LEDs */
3177         t3_led_ready(adapter);
3178
3179         if (is_offload(adapter)) {
3180                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3181                 cxgb3_adapter_ofld(adapter);
3182         }
3183
3184         /* See what interrupts we'll be using */
3185         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3186                 adapter->flags |= USING_MSIX;
3187         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3188                 adapter->flags |= USING_MSI;
3189
3190         set_nqsets(adapter);
3191
3192         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3193                                  &cxgb3_attr_group);
3194
3195         print_port_info(adapter, ai);
3196         return 0;
3197
3198 out_free_dev:
3199         iounmap(adapter->regs);
3200         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3201                 if (adapter->port[i])
3202                         free_netdev(adapter->port[i]);
3203
3204 out_free_adapter:
3205         kfree(adapter);
3206
3207 out_disable_device:
3208         pci_disable_device(pdev);
3209 out_release_regions:
3210         pci_release_regions(pdev);
3211         pci_set_drvdata(pdev, NULL);
3212         return err;
3213 }
3214
3215 static void __devexit remove_one(struct pci_dev *pdev)
3216 {
3217         struct adapter *adapter = pci_get_drvdata(pdev);
3218
3219         if (adapter) {
3220                 int i;
3221
3222                 t3_sge_stop(adapter);
3223                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3224                                    &cxgb3_attr_group);
3225
3226                 if (is_offload(adapter)) {
3227                         cxgb3_adapter_unofld(adapter);
3228                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3229                                      &adapter->open_device_map))
3230                                 offload_close(&adapter->tdev);
3231                 }
3232
3233                 for_each_port(adapter, i)
3234                     if (test_bit(i, &adapter->registered_device_map))
3235                         unregister_netdev(adapter->port[i]);
3236
3237                 t3_stop_sge_timers(adapter);
3238                 t3_free_sge_resources(adapter);
3239                 cxgb_disable_msi(adapter);
3240
3241                 for_each_port(adapter, i)
3242                         if (adapter->port[i])
3243                                 free_netdev(adapter->port[i]);
3244
3245                 iounmap(adapter->regs);
3246                 if (adapter->nofail_skb)
3247                         kfree_skb(adapter->nofail_skb);
3248                 kfree(adapter);
3249                 pci_release_regions(pdev);
3250                 pci_disable_device(pdev);
3251                 pci_set_drvdata(pdev, NULL);
3252         }
3253 }
3254
3255 static struct pci_driver driver = {
3256         .name = DRV_NAME,
3257         .id_table = cxgb3_pci_tbl,
3258         .probe = init_one,
3259         .remove = __devexit_p(remove_one),
3260         .err_handler = &t3_err_handler,
3261 };
3262
3263 static int __init cxgb3_init_module(void)
3264 {
3265         int ret;
3266
3267         cxgb3_offload_init();
3268
3269         ret = pci_register_driver(&driver);
3270         return ret;
3271 }
3272
3273 static void __exit cxgb3_cleanup_module(void)
3274 {
3275         pci_unregister_driver(&driver);
3276         if (cxgb3_wq)
3277                 destroy_workqueue(cxgb3_wq);
3278 }
3279
3280 module_init(cxgb3_init_module);
3281 module_exit(cxgb3_cleanup_module);