Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[pandora-kernel.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         CH_DEVICE(0x35, 6),     /* T3C20-derived T3C10 */
94         CH_DEVICE(0x36, 3),     /* S320E-CR */
95         CH_DEVICE(0x37, 7),     /* N320E-G2 */
96         {0,}
97 };
98
99 MODULE_DESCRIPTION(DRV_DESC);
100 MODULE_AUTHOR("Chelsio Communications");
101 MODULE_LICENSE("Dual BSD/GPL");
102 MODULE_VERSION(DRV_VERSION);
103 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
104
105 static int dflt_msg_enable = DFLT_MSG_ENABLE;
106
107 module_param(dflt_msg_enable, int, 0644);
108 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
109
110 /*
111  * The driver uses the best interrupt scheme available on a platform in the
112  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
113  * of these schemes the driver may consider as follows:
114  *
115  * msi = 2: choose from among all three options
116  * msi = 1: only consider MSI and pin interrupts
117  * msi = 0: force pin interrupts
118  */
119 static int msi = 2;
120
121 module_param(msi, int, 0644);
122 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
123
124 /*
125  * The driver enables offload as a default.
126  * To disable it, use ofld_disable = 1.
127  */
128
129 static int ofld_disable = 0;
130
131 module_param(ofld_disable, int, 0644);
132 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
133
134 /*
135  * We have work elements that we need to cancel when an interface is taken
136  * down.  Normally the work elements would be executed by keventd but that
137  * can deadlock because of linkwatch.  If our close method takes the rtnl
138  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
139  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
140  * for our work to complete.  Get our own work queue to solve this.
141  */
142 static struct workqueue_struct *cxgb3_wq;
143
144 /**
145  *      link_report - show link status and link speed/duplex
146  *      @p: the port whose settings are to be reported
147  *
148  *      Shows the link status, speed, and duplex of a port.
149  */
150 static void link_report(struct net_device *dev)
151 {
152         if (!netif_carrier_ok(dev))
153                 printk(KERN_INFO "%s: link down\n", dev->name);
154         else {
155                 const char *s = "10Mbps";
156                 const struct port_info *p = netdev_priv(dev);
157
158                 switch (p->link_config.speed) {
159                 case SPEED_10000:
160                         s = "10Gbps";
161                         break;
162                 case SPEED_1000:
163                         s = "1000Mbps";
164                         break;
165                 case SPEED_100:
166                         s = "100Mbps";
167                         break;
168                 }
169
170                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
171                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
172         }
173 }
174
175 void t3_os_link_fault(struct adapter *adap, int port_id, int state)
176 {
177         struct net_device *dev = adap->port[port_id];
178         struct port_info *pi = netdev_priv(dev);
179
180         if (state == netif_carrier_ok(dev))
181                 return;
182
183         if (state) {
184                 struct cmac *mac = &pi->mac;
185
186                 netif_carrier_on(dev);
187
188                 /* Clear local faults */
189                 t3_xgm_intr_disable(adap, pi->port_id);
190                 t3_read_reg(adap, A_XGM_INT_STATUS +
191                                     pi->mac.offset);
192                 t3_write_reg(adap,
193                              A_XGM_INT_CAUSE + pi->mac.offset,
194                              F_XGM_INT);
195
196                 t3_set_reg_field(adap,
197                                  A_XGM_INT_ENABLE +
198                                  pi->mac.offset,
199                                  F_XGM_INT, F_XGM_INT);
200                 t3_xgm_intr_enable(adap, pi->port_id);
201
202                 t3_mac_enable(mac, MAC_DIRECTION_TX);
203         } else
204                 netif_carrier_off(dev);
205
206         link_report(dev);
207 }
208
209 /**
210  *      t3_os_link_changed - handle link status changes
211  *      @adapter: the adapter associated with the link change
212  *      @port_id: the port index whose limk status has changed
213  *      @link_stat: the new status of the link
214  *      @speed: the new speed setting
215  *      @duplex: the new duplex setting
216  *      @pause: the new flow-control setting
217  *
218  *      This is the OS-dependent handler for link status changes.  The OS
219  *      neutral handler takes care of most of the processing for these events,
220  *      then calls this handler for any OS-specific processing.
221  */
222 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
223                         int speed, int duplex, int pause)
224 {
225         struct net_device *dev = adapter->port[port_id];
226         struct port_info *pi = netdev_priv(dev);
227         struct cmac *mac = &pi->mac;
228
229         /* Skip changes from disabled ports. */
230         if (!netif_running(dev))
231                 return;
232
233         if (link_stat != netif_carrier_ok(dev)) {
234                 if (link_stat) {
235                         t3_mac_enable(mac, MAC_DIRECTION_RX);
236
237                         /* Clear local faults */
238                         t3_xgm_intr_disable(adapter, pi->port_id);
239                         t3_read_reg(adapter, A_XGM_INT_STATUS +
240                                     pi->mac.offset);
241                         t3_write_reg(adapter,
242                                      A_XGM_INT_CAUSE + pi->mac.offset,
243                                      F_XGM_INT);
244
245                         t3_set_reg_field(adapter,
246                                          A_XGM_INT_ENABLE + pi->mac.offset,
247                                          F_XGM_INT, F_XGM_INT);
248                         t3_xgm_intr_enable(adapter, pi->port_id);
249
250                         netif_carrier_on(dev);
251                 } else {
252                         netif_carrier_off(dev);
253
254                         t3_xgm_intr_disable(adapter, pi->port_id);
255                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
256                         t3_set_reg_field(adapter,
257                                          A_XGM_INT_ENABLE + pi->mac.offset,
258                                          F_XGM_INT, 0);
259
260                         if (is_10G(adapter))
261                                 pi->phy.ops->power_down(&pi->phy, 1);
262
263                         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
264                         t3_mac_disable(mac, MAC_DIRECTION_RX);
265                         t3_link_start(&pi->phy, mac, &pi->link_config);
266                 }
267
268                 link_report(dev);
269         }
270 }
271
272 /**
273  *      t3_os_phymod_changed - handle PHY module changes
274  *      @phy: the PHY reporting the module change
275  *      @mod_type: new module type
276  *
277  *      This is the OS-dependent handler for PHY module changes.  It is
278  *      invoked when a PHY module is removed or inserted for any OS-specific
279  *      processing.
280  */
281 void t3_os_phymod_changed(struct adapter *adap, int port_id)
282 {
283         static const char *mod_str[] = {
284                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
285         };
286
287         const struct net_device *dev = adap->port[port_id];
288         const struct port_info *pi = netdev_priv(dev);
289
290         if (pi->phy.modtype == phy_modtype_none)
291                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
292         else
293                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
294                        mod_str[pi->phy.modtype]);
295 }
296
297 static void cxgb_set_rxmode(struct net_device *dev)
298 {
299         struct t3_rx_mode rm;
300         struct port_info *pi = netdev_priv(dev);
301
302         init_rx_mode(&rm, dev, dev->mc_list);
303         t3_mac_set_rx_mode(&pi->mac, &rm);
304 }
305
306 /**
307  *      link_start - enable a port
308  *      @dev: the device to enable
309  *
310  *      Performs the MAC and PHY actions needed to enable a port.
311  */
312 static void link_start(struct net_device *dev)
313 {
314         struct t3_rx_mode rm;
315         struct port_info *pi = netdev_priv(dev);
316         struct cmac *mac = &pi->mac;
317
318         init_rx_mode(&rm, dev, dev->mc_list);
319         t3_mac_reset(mac);
320         t3_mac_set_mtu(mac, dev->mtu);
321         t3_mac_set_address(mac, 0, dev->dev_addr);
322         t3_mac_set_rx_mode(mac, &rm);
323         t3_link_start(&pi->phy, mac, &pi->link_config);
324         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
325 }
326
327 static inline void cxgb_disable_msi(struct adapter *adapter)
328 {
329         if (adapter->flags & USING_MSIX) {
330                 pci_disable_msix(adapter->pdev);
331                 adapter->flags &= ~USING_MSIX;
332         } else if (adapter->flags & USING_MSI) {
333                 pci_disable_msi(adapter->pdev);
334                 adapter->flags &= ~USING_MSI;
335         }
336 }
337
338 /*
339  * Interrupt handler for asynchronous events used with MSI-X.
340  */
341 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
342 {
343         t3_slow_intr_handler(cookie);
344         return IRQ_HANDLED;
345 }
346
347 /*
348  * Name the MSI-X interrupts.
349  */
350 static void name_msix_vecs(struct adapter *adap)
351 {
352         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
353
354         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
355         adap->msix_info[0].desc[n] = 0;
356
357         for_each_port(adap, j) {
358                 struct net_device *d = adap->port[j];
359                 const struct port_info *pi = netdev_priv(d);
360
361                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
362                         snprintf(adap->msix_info[msi_idx].desc, n,
363                                  "%s-%d", d->name, pi->first_qset + i);
364                         adap->msix_info[msi_idx].desc[n] = 0;
365                 }
366         }
367 }
368
369 static int request_msix_data_irqs(struct adapter *adap)
370 {
371         int i, j, err, qidx = 0;
372
373         for_each_port(adap, i) {
374                 int nqsets = adap2pinfo(adap, i)->nqsets;
375
376                 for (j = 0; j < nqsets; ++j) {
377                         err = request_irq(adap->msix_info[qidx + 1].vec,
378                                           t3_intr_handler(adap,
379                                                           adap->sge.qs[qidx].
380                                                           rspq.polling), 0,
381                                           adap->msix_info[qidx + 1].desc,
382                                           &adap->sge.qs[qidx]);
383                         if (err) {
384                                 while (--qidx >= 0)
385                                         free_irq(adap->msix_info[qidx + 1].vec,
386                                                  &adap->sge.qs[qidx]);
387                                 return err;
388                         }
389                         qidx++;
390                 }
391         }
392         return 0;
393 }
394
395 static void free_irq_resources(struct adapter *adapter)
396 {
397         if (adapter->flags & USING_MSIX) {
398                 int i, n = 0;
399
400                 free_irq(adapter->msix_info[0].vec, adapter);
401                 for_each_port(adapter, i)
402                         n += adap2pinfo(adapter, i)->nqsets;
403
404                 for (i = 0; i < n; ++i)
405                         free_irq(adapter->msix_info[i + 1].vec,
406                                  &adapter->sge.qs[i]);
407         } else
408                 free_irq(adapter->pdev->irq, adapter);
409 }
410
411 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
412                               unsigned long n)
413 {
414         int attempts = 5;
415
416         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
417                 if (!--attempts)
418                         return -ETIMEDOUT;
419                 msleep(10);
420         }
421         return 0;
422 }
423
424 static int init_tp_parity(struct adapter *adap)
425 {
426         int i;
427         struct sk_buff *skb;
428         struct cpl_set_tcb_field *greq;
429         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
430
431         t3_tp_set_offload_mode(adap, 1);
432
433         for (i = 0; i < 16; i++) {
434                 struct cpl_smt_write_req *req;
435
436                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
437                 if (!skb)
438                         skb = adap->nofail_skb;
439                 if (!skb)
440                         goto alloc_skb_fail;
441
442                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
443                 memset(req, 0, sizeof(*req));
444                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
445                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
446                 req->iff = i;
447                 t3_mgmt_tx(adap, skb);
448                 if (skb == adap->nofail_skb) {
449                         await_mgmt_replies(adap, cnt, i + 1);
450                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
451                         if (!adap->nofail_skb)
452                                 goto alloc_skb_fail;
453                 }
454         }
455
456         for (i = 0; i < 2048; i++) {
457                 struct cpl_l2t_write_req *req;
458
459                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
460                 if (!skb)
461                         skb = adap->nofail_skb;
462                 if (!skb)
463                         goto alloc_skb_fail;
464
465                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
466                 memset(req, 0, sizeof(*req));
467                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
468                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
469                 req->params = htonl(V_L2T_W_IDX(i));
470                 t3_mgmt_tx(adap, skb);
471                 if (skb == adap->nofail_skb) {
472                         await_mgmt_replies(adap, cnt, 16 + i + 1);
473                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
474                         if (!adap->nofail_skb)
475                                 goto alloc_skb_fail;
476                 }
477         }
478
479         for (i = 0; i < 2048; i++) {
480                 struct cpl_rte_write_req *req;
481
482                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
483                 if (!skb)
484                         skb = adap->nofail_skb;
485                 if (!skb)
486                         goto alloc_skb_fail;
487
488                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
489                 memset(req, 0, sizeof(*req));
490                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
491                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
492                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
493                 t3_mgmt_tx(adap, skb);
494                 if (skb == adap->nofail_skb) {
495                         await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
496                         adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
497                         if (!adap->nofail_skb)
498                                 goto alloc_skb_fail;
499                 }
500         }
501
502         skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
503         if (!skb)
504                 skb = adap->nofail_skb;
505         if (!skb)
506                 goto alloc_skb_fail;
507
508         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
509         memset(greq, 0, sizeof(*greq));
510         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
511         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
512         greq->mask = cpu_to_be64(1);
513         t3_mgmt_tx(adap, skb);
514
515         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
516         if (skb == adap->nofail_skb) {
517                 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
518                 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
519         }
520
521         t3_tp_set_offload_mode(adap, 0);
522         return i;
523
524 alloc_skb_fail:
525         t3_tp_set_offload_mode(adap, 0);
526         return -ENOMEM;
527 }
528
529 /**
530  *      setup_rss - configure RSS
531  *      @adap: the adapter
532  *
533  *      Sets up RSS to distribute packets to multiple receive queues.  We
534  *      configure the RSS CPU lookup table to distribute to the number of HW
535  *      receive queues, and the response queue lookup table to narrow that
536  *      down to the response queues actually configured for each port.
537  *      We always configure the RSS mapping for two ports since the mapping
538  *      table has plenty of entries.
539  */
540 static void setup_rss(struct adapter *adap)
541 {
542         int i;
543         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
544         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
545         u8 cpus[SGE_QSETS + 1];
546         u16 rspq_map[RSS_TABLE_SIZE];
547
548         for (i = 0; i < SGE_QSETS; ++i)
549                 cpus[i] = i;
550         cpus[SGE_QSETS] = 0xff; /* terminator */
551
552         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
553                 rspq_map[i] = i % nq0;
554                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
555         }
556
557         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
558                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
559                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
560 }
561
562 static void init_napi(struct adapter *adap)
563 {
564         int i;
565
566         for (i = 0; i < SGE_QSETS; i++) {
567                 struct sge_qset *qs = &adap->sge.qs[i];
568
569                 if (qs->adap)
570                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
571                                        64);
572         }
573
574         /*
575          * netif_napi_add() can be called only once per napi_struct because it
576          * adds each new napi_struct to a list.  Be careful not to call it a
577          * second time, e.g., during EEH recovery, by making a note of it.
578          */
579         adap->flags |= NAPI_INIT;
580 }
581
582 /*
583  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
584  * both netdevices representing interfaces and the dummy ones for the extra
585  * queues.
586  */
587 static void quiesce_rx(struct adapter *adap)
588 {
589         int i;
590
591         for (i = 0; i < SGE_QSETS; i++)
592                 if (adap->sge.qs[i].adap)
593                         napi_disable(&adap->sge.qs[i].napi);
594 }
595
596 static void enable_all_napi(struct adapter *adap)
597 {
598         int i;
599         for (i = 0; i < SGE_QSETS; i++)
600                 if (adap->sge.qs[i].adap)
601                         napi_enable(&adap->sge.qs[i].napi);
602 }
603
604 /**
605  *      set_qset_lro - Turn a queue set's LRO capability on and off
606  *      @dev: the device the qset is attached to
607  *      @qset_idx: the queue set index
608  *      @val: the LRO switch
609  *
610  *      Sets LRO on or off for a particular queue set.
611  *      the device's features flag is updated to reflect the LRO
612  *      capability when all queues belonging to the device are
613  *      in the same state.
614  */
615 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
616 {
617         struct port_info *pi = netdev_priv(dev);
618         struct adapter *adapter = pi->adapter;
619
620         adapter->params.sge.qset[qset_idx].lro = !!val;
621         adapter->sge.qs[qset_idx].lro_enabled = !!val;
622 }
623
624 /**
625  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
626  *      @adap: the adapter
627  *
628  *      Determines how many sets of SGE queues to use and initializes them.
629  *      We support multiple queue sets per port if we have MSI-X, otherwise
630  *      just one queue set per port.
631  */
632 static int setup_sge_qsets(struct adapter *adap)
633 {
634         int i, j, err, irq_idx = 0, qset_idx = 0;
635         unsigned int ntxq = SGE_TXQ_PER_SET;
636
637         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
638                 irq_idx = -1;
639
640         for_each_port(adap, i) {
641                 struct net_device *dev = adap->port[i];
642                 struct port_info *pi = netdev_priv(dev);
643
644                 pi->qs = &adap->sge.qs[pi->first_qset];
645                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
646                      ++j, ++qset_idx) {
647                         set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
648                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
649                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
650                                                              irq_idx,
651                                 &adap->params.sge.qset[qset_idx], ntxq, dev,
652                                 netdev_get_tx_queue(dev, j));
653                         if (err) {
654                                 t3_free_sge_resources(adap);
655                                 return err;
656                         }
657                 }
658         }
659
660         return 0;
661 }
662
663 static ssize_t attr_show(struct device *d, char *buf,
664                          ssize_t(*format) (struct net_device *, char *))
665 {
666         ssize_t len;
667
668         /* Synchronize with ioctls that may shut down the device */
669         rtnl_lock();
670         len = (*format) (to_net_dev(d), buf);
671         rtnl_unlock();
672         return len;
673 }
674
675 static ssize_t attr_store(struct device *d,
676                           const char *buf, size_t len,
677                           ssize_t(*set) (struct net_device *, unsigned int),
678                           unsigned int min_val, unsigned int max_val)
679 {
680         char *endp;
681         ssize_t ret;
682         unsigned int val;
683
684         if (!capable(CAP_NET_ADMIN))
685                 return -EPERM;
686
687         val = simple_strtoul(buf, &endp, 0);
688         if (endp == buf || val < min_val || val > max_val)
689                 return -EINVAL;
690
691         rtnl_lock();
692         ret = (*set) (to_net_dev(d), val);
693         if (!ret)
694                 ret = len;
695         rtnl_unlock();
696         return ret;
697 }
698
699 #define CXGB3_SHOW(name, val_expr) \
700 static ssize_t format_##name(struct net_device *dev, char *buf) \
701 { \
702         struct port_info *pi = netdev_priv(dev); \
703         struct adapter *adap = pi->adapter; \
704         return sprintf(buf, "%u\n", val_expr); \
705 } \
706 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
707                            char *buf) \
708 { \
709         return attr_show(d, buf, format_##name); \
710 }
711
712 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
713 {
714         struct port_info *pi = netdev_priv(dev);
715         struct adapter *adap = pi->adapter;
716         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
717
718         if (adap->flags & FULL_INIT_DONE)
719                 return -EBUSY;
720         if (val && adap->params.rev == 0)
721                 return -EINVAL;
722         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
723             min_tids)
724                 return -EINVAL;
725         adap->params.mc5.nfilters = val;
726         return 0;
727 }
728
729 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
730                               const char *buf, size_t len)
731 {
732         return attr_store(d, buf, len, set_nfilters, 0, ~0);
733 }
734
735 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
736 {
737         struct port_info *pi = netdev_priv(dev);
738         struct adapter *adap = pi->adapter;
739
740         if (adap->flags & FULL_INIT_DONE)
741                 return -EBUSY;
742         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
743             MC5_MIN_TIDS)
744                 return -EINVAL;
745         adap->params.mc5.nservers = val;
746         return 0;
747 }
748
749 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
750                               const char *buf, size_t len)
751 {
752         return attr_store(d, buf, len, set_nservers, 0, ~0);
753 }
754
755 #define CXGB3_ATTR_R(name, val_expr) \
756 CXGB3_SHOW(name, val_expr) \
757 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
758
759 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
760 CXGB3_SHOW(name, val_expr) \
761 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
762
763 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
764 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
765 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
766
767 static struct attribute *cxgb3_attrs[] = {
768         &dev_attr_cam_size.attr,
769         &dev_attr_nfilters.attr,
770         &dev_attr_nservers.attr,
771         NULL
772 };
773
774 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
775
776 static ssize_t tm_attr_show(struct device *d,
777                             char *buf, int sched)
778 {
779         struct port_info *pi = netdev_priv(to_net_dev(d));
780         struct adapter *adap = pi->adapter;
781         unsigned int v, addr, bpt, cpt;
782         ssize_t len;
783
784         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
785         rtnl_lock();
786         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
787         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
788         if (sched & 1)
789                 v >>= 16;
790         bpt = (v >> 8) & 0xff;
791         cpt = v & 0xff;
792         if (!cpt)
793                 len = sprintf(buf, "disabled\n");
794         else {
795                 v = (adap->params.vpd.cclk * 1000) / cpt;
796                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
797         }
798         rtnl_unlock();
799         return len;
800 }
801
802 static ssize_t tm_attr_store(struct device *d,
803                              const char *buf, size_t len, int sched)
804 {
805         struct port_info *pi = netdev_priv(to_net_dev(d));
806         struct adapter *adap = pi->adapter;
807         unsigned int val;
808         char *endp;
809         ssize_t ret;
810
811         if (!capable(CAP_NET_ADMIN))
812                 return -EPERM;
813
814         val = simple_strtoul(buf, &endp, 0);
815         if (endp == buf || val > 10000000)
816                 return -EINVAL;
817
818         rtnl_lock();
819         ret = t3_config_sched(adap, val, sched);
820         if (!ret)
821                 ret = len;
822         rtnl_unlock();
823         return ret;
824 }
825
826 #define TM_ATTR(name, sched) \
827 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
828                            char *buf) \
829 { \
830         return tm_attr_show(d, buf, sched); \
831 } \
832 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
833                             const char *buf, size_t len) \
834 { \
835         return tm_attr_store(d, buf, len, sched); \
836 } \
837 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
838
839 TM_ATTR(sched0, 0);
840 TM_ATTR(sched1, 1);
841 TM_ATTR(sched2, 2);
842 TM_ATTR(sched3, 3);
843 TM_ATTR(sched4, 4);
844 TM_ATTR(sched5, 5);
845 TM_ATTR(sched6, 6);
846 TM_ATTR(sched7, 7);
847
848 static struct attribute *offload_attrs[] = {
849         &dev_attr_sched0.attr,
850         &dev_attr_sched1.attr,
851         &dev_attr_sched2.attr,
852         &dev_attr_sched3.attr,
853         &dev_attr_sched4.attr,
854         &dev_attr_sched5.attr,
855         &dev_attr_sched6.attr,
856         &dev_attr_sched7.attr,
857         NULL
858 };
859
860 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
861
862 /*
863  * Sends an sk_buff to an offload queue driver
864  * after dealing with any active network taps.
865  */
866 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
867 {
868         int ret;
869
870         local_bh_disable();
871         ret = t3_offload_tx(tdev, skb);
872         local_bh_enable();
873         return ret;
874 }
875
876 static int write_smt_entry(struct adapter *adapter, int idx)
877 {
878         struct cpl_smt_write_req *req;
879         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
880
881         if (!skb)
882                 return -ENOMEM;
883
884         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
885         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
886         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
887         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
888         req->iff = idx;
889         memset(req->src_mac1, 0, sizeof(req->src_mac1));
890         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
891         skb->priority = 1;
892         offload_tx(&adapter->tdev, skb);
893         return 0;
894 }
895
896 static int init_smt(struct adapter *adapter)
897 {
898         int i;
899
900         for_each_port(adapter, i)
901             write_smt_entry(adapter, i);
902         return 0;
903 }
904
905 static void init_port_mtus(struct adapter *adapter)
906 {
907         unsigned int mtus = adapter->port[0]->mtu;
908
909         if (adapter->port[1])
910                 mtus |= adapter->port[1]->mtu << 16;
911         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
912 }
913
914 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
915                               int hi, int port)
916 {
917         struct sk_buff *skb;
918         struct mngt_pktsched_wr *req;
919         int ret;
920
921         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
922         if (!skb)
923                 skb = adap->nofail_skb;
924         if (!skb)
925                 return -ENOMEM;
926
927         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
928         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
929         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
930         req->sched = sched;
931         req->idx = qidx;
932         req->min = lo;
933         req->max = hi;
934         req->binding = port;
935         ret = t3_mgmt_tx(adap, skb);
936         if (skb == adap->nofail_skb) {
937                 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
938                                              GFP_KERNEL);
939                 if (!adap->nofail_skb)
940                         ret = -ENOMEM;
941         }
942
943         return ret;
944 }
945
946 static int bind_qsets(struct adapter *adap)
947 {
948         int i, j, err = 0;
949
950         for_each_port(adap, i) {
951                 const struct port_info *pi = adap2pinfo(adap, i);
952
953                 for (j = 0; j < pi->nqsets; ++j) {
954                         int ret = send_pktsched_cmd(adap, 1,
955                                                     pi->first_qset + j, -1,
956                                                     -1, i);
957                         if (ret)
958                                 err = ret;
959                 }
960         }
961
962         return err;
963 }
964
965 #define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin"
966 #define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin"
967
968 static int upgrade_fw(struct adapter *adap)
969 {
970         int ret;
971         char buf[64];
972         const struct firmware *fw;
973         struct device *dev = &adap->pdev->dev;
974
975         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
976                  FW_VERSION_MINOR, FW_VERSION_MICRO);
977         ret = request_firmware(&fw, buf, dev);
978         if (ret < 0) {
979                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
980                         buf);
981                 return ret;
982         }
983         ret = t3_load_fw(adap, fw->data, fw->size);
984         release_firmware(fw);
985
986         if (ret == 0)
987                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
988                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
989         else
990                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
991                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
992
993         return ret;
994 }
995
996 static inline char t3rev2char(struct adapter *adapter)
997 {
998         char rev = 0;
999
1000         switch(adapter->params.rev) {
1001         case T3_REV_B:
1002         case T3_REV_B2:
1003                 rev = 'b';
1004                 break;
1005         case T3_REV_C:
1006                 rev = 'c';
1007                 break;
1008         }
1009         return rev;
1010 }
1011
1012 static int update_tpsram(struct adapter *adap)
1013 {
1014         const struct firmware *tpsram;
1015         char buf[64];
1016         struct device *dev = &adap->pdev->dev;
1017         int ret;
1018         char rev;
1019
1020         rev = t3rev2char(adap);
1021         if (!rev)
1022                 return 0;
1023
1024         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
1025                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1026
1027         ret = request_firmware(&tpsram, buf, dev);
1028         if (ret < 0) {
1029                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
1030                         buf);
1031                 return ret;
1032         }
1033
1034         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
1035         if (ret)
1036                 goto release_tpsram;
1037
1038         ret = t3_set_proto_sram(adap, tpsram->data);
1039         if (ret == 0)
1040                 dev_info(dev,
1041                          "successful update of protocol engine "
1042                          "to %d.%d.%d\n",
1043                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1044         else
1045                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
1046                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1047         if (ret)
1048                 dev_err(dev, "loading protocol SRAM failed\n");
1049
1050 release_tpsram:
1051         release_firmware(tpsram);
1052
1053         return ret;
1054 }
1055
1056 /**
1057  *      cxgb_up - enable the adapter
1058  *      @adapter: adapter being enabled
1059  *
1060  *      Called when the first port is enabled, this function performs the
1061  *      actions necessary to make an adapter operational, such as completing
1062  *      the initialization of HW modules, and enabling interrupts.
1063  *
1064  *      Must be called with the rtnl lock held.
1065  */
1066 static int cxgb_up(struct adapter *adap)
1067 {
1068         int err;
1069
1070         if (!(adap->flags & FULL_INIT_DONE)) {
1071                 err = t3_check_fw_version(adap);
1072                 if (err == -EINVAL) {
1073                         err = upgrade_fw(adap);
1074                         CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
1075                                 FW_VERSION_MAJOR, FW_VERSION_MINOR,
1076                                 FW_VERSION_MICRO, err ? "failed" : "succeeded");
1077                 }
1078
1079                 err = t3_check_tpsram_version(adap);
1080                 if (err == -EINVAL) {
1081                         err = update_tpsram(adap);
1082                         CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
1083                                 TP_VERSION_MAJOR, TP_VERSION_MINOR,
1084                                 TP_VERSION_MICRO, err ? "failed" : "succeeded");
1085                 }
1086
1087                 /*
1088                  * Clear interrupts now to catch errors if t3_init_hw fails.
1089                  * We clear them again later as initialization may trigger
1090                  * conditions that can interrupt.
1091                  */
1092                 t3_intr_clear(adap);
1093
1094                 err = t3_init_hw(adap, 0);
1095                 if (err)
1096                         goto out;
1097
1098                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
1099                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
1100
1101                 err = setup_sge_qsets(adap);
1102                 if (err)
1103                         goto out;
1104
1105                 setup_rss(adap);
1106                 if (!(adap->flags & NAPI_INIT))
1107                         init_napi(adap);
1108
1109                 t3_start_sge_timers(adap);
1110                 adap->flags |= FULL_INIT_DONE;
1111         }
1112
1113         t3_intr_clear(adap);
1114
1115         if (adap->flags & USING_MSIX) {
1116                 name_msix_vecs(adap);
1117                 err = request_irq(adap->msix_info[0].vec,
1118                                   t3_async_intr_handler, 0,
1119                                   adap->msix_info[0].desc, adap);
1120                 if (err)
1121                         goto irq_err;
1122
1123                 err = request_msix_data_irqs(adap);
1124                 if (err) {
1125                         free_irq(adap->msix_info[0].vec, adap);
1126                         goto irq_err;
1127                 }
1128         } else if ((err = request_irq(adap->pdev->irq,
1129                                       t3_intr_handler(adap,
1130                                                       adap->sge.qs[0].rspq.
1131                                                       polling),
1132                                       (adap->flags & USING_MSI) ?
1133                                        0 : IRQF_SHARED,
1134                                       adap->name, adap)))
1135                 goto irq_err;
1136
1137         enable_all_napi(adap);
1138         t3_sge_start(adap);
1139         t3_intr_enable(adap);
1140
1141         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1142             is_offload(adap) && init_tp_parity(adap) == 0)
1143                 adap->flags |= TP_PARITY_INIT;
1144
1145         if (adap->flags & TP_PARITY_INIT) {
1146                 t3_write_reg(adap, A_TP_INT_CAUSE,
1147                              F_CMCACHEPERR | F_ARPLUTPERR);
1148                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1149         }
1150
1151         if (!(adap->flags & QUEUES_BOUND)) {
1152                 err = bind_qsets(adap);
1153                 if (err) {
1154                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1155                         t3_intr_disable(adap);
1156                         free_irq_resources(adap);
1157                         goto out;
1158                 }
1159                 adap->flags |= QUEUES_BOUND;
1160         }
1161
1162 out:
1163         return err;
1164 irq_err:
1165         CH_ERR(adap, "request_irq failed, err %d\n", err);
1166         goto out;
1167 }
1168
1169 /*
1170  * Release resources when all the ports and offloading have been stopped.
1171  */
1172 static void cxgb_down(struct adapter *adapter)
1173 {
1174         t3_sge_stop(adapter);
1175         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1176         t3_intr_disable(adapter);
1177         spin_unlock_irq(&adapter->work_lock);
1178
1179         free_irq_resources(adapter);
1180         quiesce_rx(adapter);
1181         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1182 }
1183
1184 static void schedule_chk_task(struct adapter *adap)
1185 {
1186         unsigned int timeo;
1187
1188         timeo = adap->params.linkpoll_period ?
1189             (HZ * adap->params.linkpoll_period) / 10 :
1190             adap->params.stats_update_period * HZ;
1191         if (timeo)
1192                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1193 }
1194
1195 static int offload_open(struct net_device *dev)
1196 {
1197         struct port_info *pi = netdev_priv(dev);
1198         struct adapter *adapter = pi->adapter;
1199         struct t3cdev *tdev = dev2t3cdev(dev);
1200         int adap_up = adapter->open_device_map & PORT_MASK;
1201         int err;
1202
1203         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1204                 return 0;
1205
1206         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1207                 goto out;
1208
1209         t3_tp_set_offload_mode(adapter, 1);
1210         tdev->lldev = adapter->port[0];
1211         err = cxgb3_offload_activate(adapter);
1212         if (err)
1213                 goto out;
1214
1215         init_port_mtus(adapter);
1216         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1217                      adapter->params.b_wnd,
1218                      adapter->params.rev == 0 ?
1219                      adapter->port[0]->mtu : 0xffff);
1220         init_smt(adapter);
1221
1222         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1223                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1224
1225         /* Call back all registered clients */
1226         cxgb3_add_clients(tdev);
1227
1228 out:
1229         /* restore them in case the offload module has changed them */
1230         if (err) {
1231                 t3_tp_set_offload_mode(adapter, 0);
1232                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1233                 cxgb3_set_dummy_ops(tdev);
1234         }
1235         return err;
1236 }
1237
1238 static int offload_close(struct t3cdev *tdev)
1239 {
1240         struct adapter *adapter = tdev2adap(tdev);
1241
1242         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1243                 return 0;
1244
1245         /* Call back all registered clients */
1246         cxgb3_remove_clients(tdev);
1247
1248         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1249
1250         /* Flush work scheduled while releasing TIDs */
1251         flush_scheduled_work();
1252
1253         tdev->lldev = NULL;
1254         cxgb3_set_dummy_ops(tdev);
1255         t3_tp_set_offload_mode(adapter, 0);
1256         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1257
1258         if (!adapter->open_device_map)
1259                 cxgb_down(adapter);
1260
1261         cxgb3_offload_deactivate(adapter);
1262         return 0;
1263 }
1264
1265 static int cxgb_open(struct net_device *dev)
1266 {
1267         struct port_info *pi = netdev_priv(dev);
1268         struct adapter *adapter = pi->adapter;
1269         int other_ports = adapter->open_device_map & PORT_MASK;
1270         int err;
1271
1272         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1273                 return err;
1274
1275         set_bit(pi->port_id, &adapter->open_device_map);
1276         if (is_offload(adapter) && !ofld_disable) {
1277                 err = offload_open(dev);
1278                 if (err)
1279                         printk(KERN_WARNING
1280                                "Could not initialize offload capabilities\n");
1281         }
1282
1283         dev->real_num_tx_queues = pi->nqsets;
1284         link_start(dev);
1285         t3_port_intr_enable(adapter, pi->port_id);
1286         netif_tx_start_all_queues(dev);
1287         if (!other_ports)
1288                 schedule_chk_task(adapter);
1289
1290         return 0;
1291 }
1292
1293 static int cxgb_close(struct net_device *dev)
1294 {
1295         struct port_info *pi = netdev_priv(dev);
1296         struct adapter *adapter = pi->adapter;
1297
1298         
1299         if (!adapter->open_device_map)
1300                 return 0;
1301
1302         /* Stop link fault interrupts */
1303         t3_xgm_intr_disable(adapter, pi->port_id);
1304         t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
1305
1306         t3_port_intr_disable(adapter, pi->port_id);
1307         netif_tx_stop_all_queues(dev);
1308         pi->phy.ops->power_down(&pi->phy, 1);
1309         netif_carrier_off(dev);
1310         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1311
1312         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1313         clear_bit(pi->port_id, &adapter->open_device_map);
1314         spin_unlock_irq(&adapter->work_lock);
1315
1316         if (!(adapter->open_device_map & PORT_MASK))
1317                 cancel_delayed_work_sync(&adapter->adap_check_task);
1318
1319         if (!adapter->open_device_map)
1320                 cxgb_down(adapter);
1321
1322         return 0;
1323 }
1324
1325 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1326 {
1327         struct port_info *pi = netdev_priv(dev);
1328         struct adapter *adapter = pi->adapter;
1329         struct net_device_stats *ns = &pi->netstats;
1330         const struct mac_stats *pstats;
1331
1332         spin_lock(&adapter->stats_lock);
1333         pstats = t3_mac_update_stats(&pi->mac);
1334         spin_unlock(&adapter->stats_lock);
1335
1336         ns->tx_bytes = pstats->tx_octets;
1337         ns->tx_packets = pstats->tx_frames;
1338         ns->rx_bytes = pstats->rx_octets;
1339         ns->rx_packets = pstats->rx_frames;
1340         ns->multicast = pstats->rx_mcast_frames;
1341
1342         ns->tx_errors = pstats->tx_underrun;
1343         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1344             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1345             pstats->rx_fifo_ovfl;
1346
1347         /* detailed rx_errors */
1348         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1349         ns->rx_over_errors = 0;
1350         ns->rx_crc_errors = pstats->rx_fcs_errs;
1351         ns->rx_frame_errors = pstats->rx_symbol_errs;
1352         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1353         ns->rx_missed_errors = pstats->rx_cong_drops;
1354
1355         /* detailed tx_errors */
1356         ns->tx_aborted_errors = 0;
1357         ns->tx_carrier_errors = 0;
1358         ns->tx_fifo_errors = pstats->tx_underrun;
1359         ns->tx_heartbeat_errors = 0;
1360         ns->tx_window_errors = 0;
1361         return ns;
1362 }
1363
1364 static u32 get_msglevel(struct net_device *dev)
1365 {
1366         struct port_info *pi = netdev_priv(dev);
1367         struct adapter *adapter = pi->adapter;
1368
1369         return adapter->msg_enable;
1370 }
1371
1372 static void set_msglevel(struct net_device *dev, u32 val)
1373 {
1374         struct port_info *pi = netdev_priv(dev);
1375         struct adapter *adapter = pi->adapter;
1376
1377         adapter->msg_enable = val;
1378 }
1379
1380 static char stats_strings[][ETH_GSTRING_LEN] = {
1381         "TxOctetsOK         ",
1382         "TxFramesOK         ",
1383         "TxMulticastFramesOK",
1384         "TxBroadcastFramesOK",
1385         "TxPauseFrames      ",
1386         "TxUnderrun         ",
1387         "TxExtUnderrun      ",
1388
1389         "TxFrames64         ",
1390         "TxFrames65To127    ",
1391         "TxFrames128To255   ",
1392         "TxFrames256To511   ",
1393         "TxFrames512To1023  ",
1394         "TxFrames1024To1518 ",
1395         "TxFrames1519ToMax  ",
1396
1397         "RxOctetsOK         ",
1398         "RxFramesOK         ",
1399         "RxMulticastFramesOK",
1400         "RxBroadcastFramesOK",
1401         "RxPauseFrames      ",
1402         "RxFCSErrors        ",
1403         "RxSymbolErrors     ",
1404         "RxShortErrors      ",
1405         "RxJabberErrors     ",
1406         "RxLengthErrors     ",
1407         "RxFIFOoverflow     ",
1408
1409         "RxFrames64         ",
1410         "RxFrames65To127    ",
1411         "RxFrames128To255   ",
1412         "RxFrames256To511   ",
1413         "RxFrames512To1023  ",
1414         "RxFrames1024To1518 ",
1415         "RxFrames1519ToMax  ",
1416
1417         "PhyFIFOErrors      ",
1418         "TSO                ",
1419         "VLANextractions    ",
1420         "VLANinsertions     ",
1421         "TxCsumOffload      ",
1422         "RxCsumGood         ",
1423         "LroAggregated      ",
1424         "LroFlushed         ",
1425         "LroNoDesc          ",
1426         "RxDrops            ",
1427
1428         "CheckTXEnToggled   ",
1429         "CheckResets        ",
1430
1431         "LinkFaults         ",
1432 };
1433
1434 static int get_sset_count(struct net_device *dev, int sset)
1435 {
1436         switch (sset) {
1437         case ETH_SS_STATS:
1438                 return ARRAY_SIZE(stats_strings);
1439         default:
1440                 return -EOPNOTSUPP;
1441         }
1442 }
1443
1444 #define T3_REGMAP_SIZE (3 * 1024)
1445
1446 static int get_regs_len(struct net_device *dev)
1447 {
1448         return T3_REGMAP_SIZE;
1449 }
1450
1451 static int get_eeprom_len(struct net_device *dev)
1452 {
1453         return EEPROMSIZE;
1454 }
1455
1456 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1457 {
1458         struct port_info *pi = netdev_priv(dev);
1459         struct adapter *adapter = pi->adapter;
1460         u32 fw_vers = 0;
1461         u32 tp_vers = 0;
1462
1463         spin_lock(&adapter->stats_lock);
1464         t3_get_fw_version(adapter, &fw_vers);
1465         t3_get_tp_version(adapter, &tp_vers);
1466         spin_unlock(&adapter->stats_lock);
1467
1468         strcpy(info->driver, DRV_NAME);
1469         strcpy(info->version, DRV_VERSION);
1470         strcpy(info->bus_info, pci_name(adapter->pdev));
1471         if (!fw_vers)
1472                 strcpy(info->fw_version, "N/A");
1473         else {
1474                 snprintf(info->fw_version, sizeof(info->fw_version),
1475                          "%s %u.%u.%u TP %u.%u.%u",
1476                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1477                          G_FW_VERSION_MAJOR(fw_vers),
1478                          G_FW_VERSION_MINOR(fw_vers),
1479                          G_FW_VERSION_MICRO(fw_vers),
1480                          G_TP_VERSION_MAJOR(tp_vers),
1481                          G_TP_VERSION_MINOR(tp_vers),
1482                          G_TP_VERSION_MICRO(tp_vers));
1483         }
1484 }
1485
1486 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1487 {
1488         if (stringset == ETH_SS_STATS)
1489                 memcpy(data, stats_strings, sizeof(stats_strings));
1490 }
1491
1492 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1493                                             struct port_info *p, int idx)
1494 {
1495         int i;
1496         unsigned long tot = 0;
1497
1498         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1499                 tot += adapter->sge.qs[i].port_stats[idx];
1500         return tot;
1501 }
1502
1503 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1504                       u64 *data)
1505 {
1506         struct port_info *pi = netdev_priv(dev);
1507         struct adapter *adapter = pi->adapter;
1508         const struct mac_stats *s;
1509
1510         spin_lock(&adapter->stats_lock);
1511         s = t3_mac_update_stats(&pi->mac);
1512         spin_unlock(&adapter->stats_lock);
1513
1514         *data++ = s->tx_octets;
1515         *data++ = s->tx_frames;
1516         *data++ = s->tx_mcast_frames;
1517         *data++ = s->tx_bcast_frames;
1518         *data++ = s->tx_pause;
1519         *data++ = s->tx_underrun;
1520         *data++ = s->tx_fifo_urun;
1521
1522         *data++ = s->tx_frames_64;
1523         *data++ = s->tx_frames_65_127;
1524         *data++ = s->tx_frames_128_255;
1525         *data++ = s->tx_frames_256_511;
1526         *data++ = s->tx_frames_512_1023;
1527         *data++ = s->tx_frames_1024_1518;
1528         *data++ = s->tx_frames_1519_max;
1529
1530         *data++ = s->rx_octets;
1531         *data++ = s->rx_frames;
1532         *data++ = s->rx_mcast_frames;
1533         *data++ = s->rx_bcast_frames;
1534         *data++ = s->rx_pause;
1535         *data++ = s->rx_fcs_errs;
1536         *data++ = s->rx_symbol_errs;
1537         *data++ = s->rx_short;
1538         *data++ = s->rx_jabber;
1539         *data++ = s->rx_too_long;
1540         *data++ = s->rx_fifo_ovfl;
1541
1542         *data++ = s->rx_frames_64;
1543         *data++ = s->rx_frames_65_127;
1544         *data++ = s->rx_frames_128_255;
1545         *data++ = s->rx_frames_256_511;
1546         *data++ = s->rx_frames_512_1023;
1547         *data++ = s->rx_frames_1024_1518;
1548         *data++ = s->rx_frames_1519_max;
1549
1550         *data++ = pi->phy.fifo_errors;
1551
1552         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1553         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1554         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1555         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1556         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1557         *data++ = 0;
1558         *data++ = 0;
1559         *data++ = 0;
1560         *data++ = s->rx_cong_drops;
1561
1562         *data++ = s->num_toggled;
1563         *data++ = s->num_resets;
1564
1565         *data++ = s->link_faults;
1566 }
1567
1568 static inline void reg_block_dump(struct adapter *ap, void *buf,
1569                                   unsigned int start, unsigned int end)
1570 {
1571         u32 *p = buf + start;
1572
1573         for (; start <= end; start += sizeof(u32))
1574                 *p++ = t3_read_reg(ap, start);
1575 }
1576
1577 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1578                      void *buf)
1579 {
1580         struct port_info *pi = netdev_priv(dev);
1581         struct adapter *ap = pi->adapter;
1582
1583         /*
1584          * Version scheme:
1585          * bits 0..9: chip version
1586          * bits 10..15: chip revision
1587          * bit 31: set for PCIe cards
1588          */
1589         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1590
1591         /*
1592          * We skip the MAC statistics registers because they are clear-on-read.
1593          * Also reading multi-register stats would need to synchronize with the
1594          * periodic mac stats accumulation.  Hard to justify the complexity.
1595          */
1596         memset(buf, 0, T3_REGMAP_SIZE);
1597         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1598         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1599         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1600         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1601         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1602         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1603                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1604         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1605                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1606 }
1607
1608 static int restart_autoneg(struct net_device *dev)
1609 {
1610         struct port_info *p = netdev_priv(dev);
1611
1612         if (!netif_running(dev))
1613                 return -EAGAIN;
1614         if (p->link_config.autoneg != AUTONEG_ENABLE)
1615                 return -EINVAL;
1616         p->phy.ops->autoneg_restart(&p->phy);
1617         return 0;
1618 }
1619
1620 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1621 {
1622         struct port_info *pi = netdev_priv(dev);
1623         struct adapter *adapter = pi->adapter;
1624         int i;
1625
1626         if (data == 0)
1627                 data = 2;
1628
1629         for (i = 0; i < data * 2; i++) {
1630                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1631                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1632                 if (msleep_interruptible(500))
1633                         break;
1634         }
1635         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1636                          F_GPIO0_OUT_VAL);
1637         return 0;
1638 }
1639
1640 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1641 {
1642         struct port_info *p = netdev_priv(dev);
1643
1644         cmd->supported = p->link_config.supported;
1645         cmd->advertising = p->link_config.advertising;
1646
1647         if (netif_carrier_ok(dev)) {
1648                 cmd->speed = p->link_config.speed;
1649                 cmd->duplex = p->link_config.duplex;
1650         } else {
1651                 cmd->speed = -1;
1652                 cmd->duplex = -1;
1653         }
1654
1655         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1656         cmd->phy_address = p->phy.mdio.prtad;
1657         cmd->transceiver = XCVR_EXTERNAL;
1658         cmd->autoneg = p->link_config.autoneg;
1659         cmd->maxtxpkt = 0;
1660         cmd->maxrxpkt = 0;
1661         return 0;
1662 }
1663
1664 static int speed_duplex_to_caps(int speed, int duplex)
1665 {
1666         int cap = 0;
1667
1668         switch (speed) {
1669         case SPEED_10:
1670                 if (duplex == DUPLEX_FULL)
1671                         cap = SUPPORTED_10baseT_Full;
1672                 else
1673                         cap = SUPPORTED_10baseT_Half;
1674                 break;
1675         case SPEED_100:
1676                 if (duplex == DUPLEX_FULL)
1677                         cap = SUPPORTED_100baseT_Full;
1678                 else
1679                         cap = SUPPORTED_100baseT_Half;
1680                 break;
1681         case SPEED_1000:
1682                 if (duplex == DUPLEX_FULL)
1683                         cap = SUPPORTED_1000baseT_Full;
1684                 else
1685                         cap = SUPPORTED_1000baseT_Half;
1686                 break;
1687         case SPEED_10000:
1688                 if (duplex == DUPLEX_FULL)
1689                         cap = SUPPORTED_10000baseT_Full;
1690         }
1691         return cap;
1692 }
1693
1694 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1695                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1696                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1697                       ADVERTISED_10000baseT_Full)
1698
1699 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1700 {
1701         struct port_info *p = netdev_priv(dev);
1702         struct link_config *lc = &p->link_config;
1703
1704         if (!(lc->supported & SUPPORTED_Autoneg)) {
1705                 /*
1706                  * PHY offers a single speed/duplex.  See if that's what's
1707                  * being requested.
1708                  */
1709                 if (cmd->autoneg == AUTONEG_DISABLE) {
1710                         int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1711                         if (lc->supported & cap)
1712                                 return 0;
1713                 }
1714                 return -EINVAL;
1715         }
1716
1717         if (cmd->autoneg == AUTONEG_DISABLE) {
1718                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1719
1720                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1721                         return -EINVAL;
1722                 lc->requested_speed = cmd->speed;
1723                 lc->requested_duplex = cmd->duplex;
1724                 lc->advertising = 0;
1725         } else {
1726                 cmd->advertising &= ADVERTISED_MASK;
1727                 cmd->advertising &= lc->supported;
1728                 if (!cmd->advertising)
1729                         return -EINVAL;
1730                 lc->requested_speed = SPEED_INVALID;
1731                 lc->requested_duplex = DUPLEX_INVALID;
1732                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1733         }
1734         lc->autoneg = cmd->autoneg;
1735         if (netif_running(dev))
1736                 t3_link_start(&p->phy, &p->mac, lc);
1737         return 0;
1738 }
1739
1740 static void get_pauseparam(struct net_device *dev,
1741                            struct ethtool_pauseparam *epause)
1742 {
1743         struct port_info *p = netdev_priv(dev);
1744
1745         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1746         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1747         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1748 }
1749
1750 static int set_pauseparam(struct net_device *dev,
1751                           struct ethtool_pauseparam *epause)
1752 {
1753         struct port_info *p = netdev_priv(dev);
1754         struct link_config *lc = &p->link_config;
1755
1756         if (epause->autoneg == AUTONEG_DISABLE)
1757                 lc->requested_fc = 0;
1758         else if (lc->supported & SUPPORTED_Autoneg)
1759                 lc->requested_fc = PAUSE_AUTONEG;
1760         else
1761                 return -EINVAL;
1762
1763         if (epause->rx_pause)
1764                 lc->requested_fc |= PAUSE_RX;
1765         if (epause->tx_pause)
1766                 lc->requested_fc |= PAUSE_TX;
1767         if (lc->autoneg == AUTONEG_ENABLE) {
1768                 if (netif_running(dev))
1769                         t3_link_start(&p->phy, &p->mac, lc);
1770         } else {
1771                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1772                 if (netif_running(dev))
1773                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1774         }
1775         return 0;
1776 }
1777
1778 static u32 get_rx_csum(struct net_device *dev)
1779 {
1780         struct port_info *p = netdev_priv(dev);
1781
1782         return p->rx_offload & T3_RX_CSUM;
1783 }
1784
1785 static int set_rx_csum(struct net_device *dev, u32 data)
1786 {
1787         struct port_info *p = netdev_priv(dev);
1788
1789         if (data) {
1790                 p->rx_offload |= T3_RX_CSUM;
1791         } else {
1792                 int i;
1793
1794                 p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
1795                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1796                         set_qset_lro(dev, i, 0);
1797         }
1798         return 0;
1799 }
1800
1801 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1802 {
1803         struct port_info *pi = netdev_priv(dev);
1804         struct adapter *adapter = pi->adapter;
1805         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1806
1807         e->rx_max_pending = MAX_RX_BUFFERS;
1808         e->rx_mini_max_pending = 0;
1809         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1810         e->tx_max_pending = MAX_TXQ_ENTRIES;
1811
1812         e->rx_pending = q->fl_size;
1813         e->rx_mini_pending = q->rspq_size;
1814         e->rx_jumbo_pending = q->jumbo_size;
1815         e->tx_pending = q->txq_size[0];
1816 }
1817
1818 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1819 {
1820         struct port_info *pi = netdev_priv(dev);
1821         struct adapter *adapter = pi->adapter;
1822         struct qset_params *q;
1823         int i;
1824
1825         if (e->rx_pending > MAX_RX_BUFFERS ||
1826             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1827             e->tx_pending > MAX_TXQ_ENTRIES ||
1828             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1829             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1830             e->rx_pending < MIN_FL_ENTRIES ||
1831             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1832             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1833                 return -EINVAL;
1834
1835         if (adapter->flags & FULL_INIT_DONE)
1836                 return -EBUSY;
1837
1838         q = &adapter->params.sge.qset[pi->first_qset];
1839         for (i = 0; i < pi->nqsets; ++i, ++q) {
1840                 q->rspq_size = e->rx_mini_pending;
1841                 q->fl_size = e->rx_pending;
1842                 q->jumbo_size = e->rx_jumbo_pending;
1843                 q->txq_size[0] = e->tx_pending;
1844                 q->txq_size[1] = e->tx_pending;
1845                 q->txq_size[2] = e->tx_pending;
1846         }
1847         return 0;
1848 }
1849
1850 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1851 {
1852         struct port_info *pi = netdev_priv(dev);
1853         struct adapter *adapter = pi->adapter;
1854         struct qset_params *qsp = &adapter->params.sge.qset[0];
1855         struct sge_qset *qs = &adapter->sge.qs[0];
1856
1857         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1858                 return -EINVAL;
1859
1860         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1861         t3_update_qset_coalesce(qs, qsp);
1862         return 0;
1863 }
1864
1865 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1866 {
1867         struct port_info *pi = netdev_priv(dev);
1868         struct adapter *adapter = pi->adapter;
1869         struct qset_params *q = adapter->params.sge.qset;
1870
1871         c->rx_coalesce_usecs = q->coalesce_usecs;
1872         return 0;
1873 }
1874
1875 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1876                       u8 * data)
1877 {
1878         struct port_info *pi = netdev_priv(dev);
1879         struct adapter *adapter = pi->adapter;
1880         int i, err = 0;
1881
1882         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1883         if (!buf)
1884                 return -ENOMEM;
1885
1886         e->magic = EEPROM_MAGIC;
1887         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1888                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1889
1890         if (!err)
1891                 memcpy(data, buf + e->offset, e->len);
1892         kfree(buf);
1893         return err;
1894 }
1895
1896 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1897                       u8 * data)
1898 {
1899         struct port_info *pi = netdev_priv(dev);
1900         struct adapter *adapter = pi->adapter;
1901         u32 aligned_offset, aligned_len;
1902         __le32 *p;
1903         u8 *buf;
1904         int err;
1905
1906         if (eeprom->magic != EEPROM_MAGIC)
1907                 return -EINVAL;
1908
1909         aligned_offset = eeprom->offset & ~3;
1910         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1911
1912         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1913                 buf = kmalloc(aligned_len, GFP_KERNEL);
1914                 if (!buf)
1915                         return -ENOMEM;
1916                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1917                 if (!err && aligned_len > 4)
1918                         err = t3_seeprom_read(adapter,
1919                                               aligned_offset + aligned_len - 4,
1920                                               (__le32 *) & buf[aligned_len - 4]);
1921                 if (err)
1922                         goto out;
1923                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1924         } else
1925                 buf = data;
1926
1927         err = t3_seeprom_wp(adapter, 0);
1928         if (err)
1929                 goto out;
1930
1931         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1932                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1933                 aligned_offset += 4;
1934         }
1935
1936         if (!err)
1937                 err = t3_seeprom_wp(adapter, 1);
1938 out:
1939         if (buf != data)
1940                 kfree(buf);
1941         return err;
1942 }
1943
1944 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1945 {
1946         wol->supported = 0;
1947         wol->wolopts = 0;
1948         memset(&wol->sopass, 0, sizeof(wol->sopass));
1949 }
1950
1951 static const struct ethtool_ops cxgb_ethtool_ops = {
1952         .get_settings = get_settings,
1953         .set_settings = set_settings,
1954         .get_drvinfo = get_drvinfo,
1955         .get_msglevel = get_msglevel,
1956         .set_msglevel = set_msglevel,
1957         .get_ringparam = get_sge_param,
1958         .set_ringparam = set_sge_param,
1959         .get_coalesce = get_coalesce,
1960         .set_coalesce = set_coalesce,
1961         .get_eeprom_len = get_eeprom_len,
1962         .get_eeprom = get_eeprom,
1963         .set_eeprom = set_eeprom,
1964         .get_pauseparam = get_pauseparam,
1965         .set_pauseparam = set_pauseparam,
1966         .get_rx_csum = get_rx_csum,
1967         .set_rx_csum = set_rx_csum,
1968         .set_tx_csum = ethtool_op_set_tx_csum,
1969         .set_sg = ethtool_op_set_sg,
1970         .get_link = ethtool_op_get_link,
1971         .get_strings = get_strings,
1972         .phys_id = cxgb3_phys_id,
1973         .nway_reset = restart_autoneg,
1974         .get_sset_count = get_sset_count,
1975         .get_ethtool_stats = get_stats,
1976         .get_regs_len = get_regs_len,
1977         .get_regs = get_regs,
1978         .get_wol = get_wol,
1979         .set_tso = ethtool_op_set_tso,
1980 };
1981
1982 static int in_range(int val, int lo, int hi)
1983 {
1984         return val < 0 || (val <= hi && val >= lo);
1985 }
1986
1987 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1988 {
1989         struct port_info *pi = netdev_priv(dev);
1990         struct adapter *adapter = pi->adapter;
1991         u32 cmd;
1992         int ret;
1993
1994         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1995                 return -EFAULT;
1996
1997         switch (cmd) {
1998         case CHELSIO_SET_QSET_PARAMS:{
1999                 int i;
2000                 struct qset_params *q;
2001                 struct ch_qset_params t;
2002                 int q1 = pi->first_qset;
2003                 int nqsets = pi->nqsets;
2004
2005                 if (!capable(CAP_NET_ADMIN))
2006                         return -EPERM;
2007                 if (copy_from_user(&t, useraddr, sizeof(t)))
2008                         return -EFAULT;
2009                 if (t.qset_idx >= SGE_QSETS)
2010                         return -EINVAL;
2011                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
2012                         !in_range(t.cong_thres, 0, 255) ||
2013                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
2014                                 MAX_TXQ_ENTRIES) ||
2015                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
2016                                 MAX_TXQ_ENTRIES) ||
2017                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
2018                                 MAX_CTRL_TXQ_ENTRIES) ||
2019                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
2020                                 MAX_RX_BUFFERS)
2021                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
2022                                         MAX_RX_JUMBO_BUFFERS)
2023                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
2024                                         MAX_RSPQ_ENTRIES))
2025                         return -EINVAL;
2026
2027                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
2028                         for_each_port(adapter, i) {
2029                                 pi = adap2pinfo(adapter, i);
2030                                 if (t.qset_idx >= pi->first_qset &&
2031                                     t.qset_idx < pi->first_qset + pi->nqsets &&
2032                                     !(pi->rx_offload & T3_RX_CSUM))
2033                                         return -EINVAL;
2034                         }
2035
2036                 if ((adapter->flags & FULL_INIT_DONE) &&
2037                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
2038                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
2039                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
2040                         t.polling >= 0 || t.cong_thres >= 0))
2041                         return -EBUSY;
2042
2043                 /* Allow setting of any available qset when offload enabled */
2044                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2045                         q1 = 0;
2046                         for_each_port(adapter, i) {
2047                                 pi = adap2pinfo(adapter, i);
2048                                 nqsets += pi->first_qset + pi->nqsets;
2049                         }
2050                 }
2051
2052                 if (t.qset_idx < q1)
2053                         return -EINVAL;
2054                 if (t.qset_idx > q1 + nqsets - 1)
2055                         return -EINVAL;
2056
2057                 q = &adapter->params.sge.qset[t.qset_idx];
2058
2059                 if (t.rspq_size >= 0)
2060                         q->rspq_size = t.rspq_size;
2061                 if (t.fl_size[0] >= 0)
2062                         q->fl_size = t.fl_size[0];
2063                 if (t.fl_size[1] >= 0)
2064                         q->jumbo_size = t.fl_size[1];
2065                 if (t.txq_size[0] >= 0)
2066                         q->txq_size[0] = t.txq_size[0];
2067                 if (t.txq_size[1] >= 0)
2068                         q->txq_size[1] = t.txq_size[1];
2069                 if (t.txq_size[2] >= 0)
2070                         q->txq_size[2] = t.txq_size[2];
2071                 if (t.cong_thres >= 0)
2072                         q->cong_thres = t.cong_thres;
2073                 if (t.intr_lat >= 0) {
2074                         struct sge_qset *qs =
2075                                 &adapter->sge.qs[t.qset_idx];
2076
2077                         q->coalesce_usecs = t.intr_lat;
2078                         t3_update_qset_coalesce(qs, q);
2079                 }
2080                 if (t.polling >= 0) {
2081                         if (adapter->flags & USING_MSIX)
2082                                 q->polling = t.polling;
2083                         else {
2084                                 /* No polling with INTx for T3A */
2085                                 if (adapter->params.rev == 0 &&
2086                                         !(adapter->flags & USING_MSI))
2087                                         t.polling = 0;
2088
2089                                 for (i = 0; i < SGE_QSETS; i++) {
2090                                         q = &adapter->params.sge.
2091                                                 qset[i];
2092                                         q->polling = t.polling;
2093                                 }
2094                         }
2095                 }
2096                 if (t.lro >= 0)
2097                         set_qset_lro(dev, t.qset_idx, t.lro);
2098
2099                 break;
2100         }
2101         case CHELSIO_GET_QSET_PARAMS:{
2102                 struct qset_params *q;
2103                 struct ch_qset_params t;
2104                 int q1 = pi->first_qset;
2105                 int nqsets = pi->nqsets;
2106                 int i;
2107
2108                 if (copy_from_user(&t, useraddr, sizeof(t)))
2109                         return -EFAULT;
2110
2111                 /* Display qsets for all ports when offload enabled */
2112                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2113                         q1 = 0;
2114                         for_each_port(adapter, i) {
2115                                 pi = adap2pinfo(adapter, i);
2116                                 nqsets = pi->first_qset + pi->nqsets;
2117                         }
2118                 }
2119
2120                 if (t.qset_idx >= nqsets)
2121                         return -EINVAL;
2122
2123                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2124                 t.rspq_size = q->rspq_size;
2125                 t.txq_size[0] = q->txq_size[0];
2126                 t.txq_size[1] = q->txq_size[1];
2127                 t.txq_size[2] = q->txq_size[2];
2128                 t.fl_size[0] = q->fl_size;
2129                 t.fl_size[1] = q->jumbo_size;
2130                 t.polling = q->polling;
2131                 t.lro = q->lro;
2132                 t.intr_lat = q->coalesce_usecs;
2133                 t.cong_thres = q->cong_thres;
2134                 t.qnum = q1;
2135
2136                 if (adapter->flags & USING_MSIX)
2137                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2138                 else
2139                         t.vector = adapter->pdev->irq;
2140
2141                 if (copy_to_user(useraddr, &t, sizeof(t)))
2142                         return -EFAULT;
2143                 break;
2144         }
2145         case CHELSIO_SET_QSET_NUM:{
2146                 struct ch_reg edata;
2147                 unsigned int i, first_qset = 0, other_qsets = 0;
2148
2149                 if (!capable(CAP_NET_ADMIN))
2150                         return -EPERM;
2151                 if (adapter->flags & FULL_INIT_DONE)
2152                         return -EBUSY;
2153                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2154                         return -EFAULT;
2155                 if (edata.val < 1 ||
2156                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2157                         return -EINVAL;
2158
2159                 for_each_port(adapter, i)
2160                         if (adapter->port[i] && adapter->port[i] != dev)
2161                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2162
2163                 if (edata.val + other_qsets > SGE_QSETS)
2164                         return -EINVAL;
2165
2166                 pi->nqsets = edata.val;
2167
2168                 for_each_port(adapter, i)
2169                         if (adapter->port[i]) {
2170                                 pi = adap2pinfo(adapter, i);
2171                                 pi->first_qset = first_qset;
2172                                 first_qset += pi->nqsets;
2173                         }
2174                 break;
2175         }
2176         case CHELSIO_GET_QSET_NUM:{
2177                 struct ch_reg edata;
2178
2179                 edata.cmd = CHELSIO_GET_QSET_NUM;
2180                 edata.val = pi->nqsets;
2181                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2182                         return -EFAULT;
2183                 break;
2184         }
2185         case CHELSIO_LOAD_FW:{
2186                 u8 *fw_data;
2187                 struct ch_mem_range t;
2188
2189                 if (!capable(CAP_SYS_RAWIO))
2190                         return -EPERM;
2191                 if (copy_from_user(&t, useraddr, sizeof(t)))
2192                         return -EFAULT;
2193                 /* Check t.len sanity ? */
2194                 fw_data = kmalloc(t.len, GFP_KERNEL);
2195                 if (!fw_data)
2196                         return -ENOMEM;
2197
2198                 if (copy_from_user
2199                         (fw_data, useraddr + sizeof(t), t.len)) {
2200                         kfree(fw_data);
2201                         return -EFAULT;
2202                 }
2203
2204                 ret = t3_load_fw(adapter, fw_data, t.len);
2205                 kfree(fw_data);
2206                 if (ret)
2207                         return ret;
2208                 break;
2209         }
2210         case CHELSIO_SETMTUTAB:{
2211                 struct ch_mtus m;
2212                 int i;
2213
2214                 if (!is_offload(adapter))
2215                         return -EOPNOTSUPP;
2216                 if (!capable(CAP_NET_ADMIN))
2217                         return -EPERM;
2218                 if (offload_running(adapter))
2219                         return -EBUSY;
2220                 if (copy_from_user(&m, useraddr, sizeof(m)))
2221                         return -EFAULT;
2222                 if (m.nmtus != NMTUS)
2223                         return -EINVAL;
2224                 if (m.mtus[0] < 81)     /* accommodate SACK */
2225                         return -EINVAL;
2226
2227                 /* MTUs must be in ascending order */
2228                 for (i = 1; i < NMTUS; ++i)
2229                         if (m.mtus[i] < m.mtus[i - 1])
2230                                 return -EINVAL;
2231
2232                 memcpy(adapter->params.mtus, m.mtus,
2233                         sizeof(adapter->params.mtus));
2234                 break;
2235         }
2236         case CHELSIO_GET_PM:{
2237                 struct tp_params *p = &adapter->params.tp;
2238                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2239
2240                 if (!is_offload(adapter))
2241                         return -EOPNOTSUPP;
2242                 m.tx_pg_sz = p->tx_pg_size;
2243                 m.tx_num_pg = p->tx_num_pgs;
2244                 m.rx_pg_sz = p->rx_pg_size;
2245                 m.rx_num_pg = p->rx_num_pgs;
2246                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2247                 if (copy_to_user(useraddr, &m, sizeof(m)))
2248                         return -EFAULT;
2249                 break;
2250         }
2251         case CHELSIO_SET_PM:{
2252                 struct ch_pm m;
2253                 struct tp_params *p = &adapter->params.tp;
2254
2255                 if (!is_offload(adapter))
2256                         return -EOPNOTSUPP;
2257                 if (!capable(CAP_NET_ADMIN))
2258                         return -EPERM;
2259                 if (adapter->flags & FULL_INIT_DONE)
2260                         return -EBUSY;
2261                 if (copy_from_user(&m, useraddr, sizeof(m)))
2262                         return -EFAULT;
2263                 if (!is_power_of_2(m.rx_pg_sz) ||
2264                         !is_power_of_2(m.tx_pg_sz))
2265                         return -EINVAL; /* not power of 2 */
2266                 if (!(m.rx_pg_sz & 0x14000))
2267                         return -EINVAL; /* not 16KB or 64KB */
2268                 if (!(m.tx_pg_sz & 0x1554000))
2269                         return -EINVAL;
2270                 if (m.tx_num_pg == -1)
2271                         m.tx_num_pg = p->tx_num_pgs;
2272                 if (m.rx_num_pg == -1)
2273                         m.rx_num_pg = p->rx_num_pgs;
2274                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2275                         return -EINVAL;
2276                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2277                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2278                         return -EINVAL;
2279                 p->rx_pg_size = m.rx_pg_sz;
2280                 p->tx_pg_size = m.tx_pg_sz;
2281                 p->rx_num_pgs = m.rx_num_pg;
2282                 p->tx_num_pgs = m.tx_num_pg;
2283                 break;
2284         }
2285         case CHELSIO_GET_MEM:{
2286                 struct ch_mem_range t;
2287                 struct mc7 *mem;
2288                 u64 buf[32];
2289
2290                 if (!is_offload(adapter))
2291                         return -EOPNOTSUPP;
2292                 if (!(adapter->flags & FULL_INIT_DONE))
2293                         return -EIO;    /* need the memory controllers */
2294                 if (copy_from_user(&t, useraddr, sizeof(t)))
2295                         return -EFAULT;
2296                 if ((t.addr & 7) || (t.len & 7))
2297                         return -EINVAL;
2298                 if (t.mem_id == MEM_CM)
2299                         mem = &adapter->cm;
2300                 else if (t.mem_id == MEM_PMRX)
2301                         mem = &adapter->pmrx;
2302                 else if (t.mem_id == MEM_PMTX)
2303                         mem = &adapter->pmtx;
2304                 else
2305                         return -EINVAL;
2306
2307                 /*
2308                  * Version scheme:
2309                  * bits 0..9: chip version
2310                  * bits 10..15: chip revision
2311                  */
2312                 t.version = 3 | (adapter->params.rev << 10);
2313                 if (copy_to_user(useraddr, &t, sizeof(t)))
2314                         return -EFAULT;
2315
2316                 /*
2317                  * Read 256 bytes at a time as len can be large and we don't
2318                  * want to use huge intermediate buffers.
2319                  */
2320                 useraddr += sizeof(t);  /* advance to start of buffer */
2321                 while (t.len) {
2322                         unsigned int chunk =
2323                                 min_t(unsigned int, t.len, sizeof(buf));
2324
2325                         ret =
2326                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2327                                                 buf);
2328                         if (ret)
2329                                 return ret;
2330                         if (copy_to_user(useraddr, buf, chunk))
2331                                 return -EFAULT;
2332                         useraddr += chunk;
2333                         t.addr += chunk;
2334                         t.len -= chunk;
2335                 }
2336                 break;
2337         }
2338         case CHELSIO_SET_TRACE_FILTER:{
2339                 struct ch_trace t;
2340                 const struct trace_params *tp;
2341
2342                 if (!capable(CAP_NET_ADMIN))
2343                         return -EPERM;
2344                 if (!offload_running(adapter))
2345                         return -EAGAIN;
2346                 if (copy_from_user(&t, useraddr, sizeof(t)))
2347                         return -EFAULT;
2348
2349                 tp = (const struct trace_params *)&t.sip;
2350                 if (t.config_tx)
2351                         t3_config_trace_filter(adapter, tp, 0,
2352                                                 t.invert_match,
2353                                                 t.trace_tx);
2354                 if (t.config_rx)
2355                         t3_config_trace_filter(adapter, tp, 1,
2356                                                 t.invert_match,
2357                                                 t.trace_rx);
2358                 break;
2359         }
2360         default:
2361                 return -EOPNOTSUPP;
2362         }
2363         return 0;
2364 }
2365
2366 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2367 {
2368         struct mii_ioctl_data *data = if_mii(req);
2369         struct port_info *pi = netdev_priv(dev);
2370         struct adapter *adapter = pi->adapter;
2371
2372         switch (cmd) {
2373         case SIOCGMIIREG:
2374         case SIOCSMIIREG:
2375                 /* Convert phy_id from older PRTAD/DEVAD format */
2376                 if (is_10G(adapter) &&
2377                     !mdio_phy_id_is_c45(data->phy_id) &&
2378                     (data->phy_id & 0x1f00) &&
2379                     !(data->phy_id & 0xe0e0))
2380                         data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
2381                                                        data->phy_id & 0x1f);
2382                 /* FALLTHRU */
2383         case SIOCGMIIPHY:
2384                 return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
2385         case SIOCCHIOCTL:
2386                 return cxgb_extension_ioctl(dev, req->ifr_data);
2387         default:
2388                 return -EOPNOTSUPP;
2389         }
2390 }
2391
2392 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2393 {
2394         struct port_info *pi = netdev_priv(dev);
2395         struct adapter *adapter = pi->adapter;
2396         int ret;
2397
2398         if (new_mtu < 81)       /* accommodate SACK */
2399                 return -EINVAL;
2400         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2401                 return ret;
2402         dev->mtu = new_mtu;
2403         init_port_mtus(adapter);
2404         if (adapter->params.rev == 0 && offload_running(adapter))
2405                 t3_load_mtus(adapter, adapter->params.mtus,
2406                              adapter->params.a_wnd, adapter->params.b_wnd,
2407                              adapter->port[0]->mtu);
2408         return 0;
2409 }
2410
2411 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2412 {
2413         struct port_info *pi = netdev_priv(dev);
2414         struct adapter *adapter = pi->adapter;
2415         struct sockaddr *addr = p;
2416
2417         if (!is_valid_ether_addr(addr->sa_data))
2418                 return -EINVAL;
2419
2420         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2421         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2422         if (offload_running(adapter))
2423                 write_smt_entry(adapter, pi->port_id);
2424         return 0;
2425 }
2426
2427 /**
2428  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2429  * @adap: the adapter
2430  * @p: the port
2431  *
2432  * Ensures that current Rx processing on any of the queues associated with
2433  * the given port completes before returning.  We do this by acquiring and
2434  * releasing the locks of the response queues associated with the port.
2435  */
2436 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2437 {
2438         int i;
2439
2440         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2441                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2442
2443                 spin_lock_irq(&q->lock);
2444                 spin_unlock_irq(&q->lock);
2445         }
2446 }
2447
2448 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2449 {
2450         struct port_info *pi = netdev_priv(dev);
2451         struct adapter *adapter = pi->adapter;
2452
2453         pi->vlan_grp = grp;
2454         if (adapter->params.rev > 0)
2455                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2456         else {
2457                 /* single control for all ports */
2458                 unsigned int i, have_vlans = 0;
2459                 for_each_port(adapter, i)
2460                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2461
2462                 t3_set_vlan_accel(adapter, 1, have_vlans);
2463         }
2464         t3_synchronize_rx(adapter, pi);
2465 }
2466
2467 #ifdef CONFIG_NET_POLL_CONTROLLER
2468 static void cxgb_netpoll(struct net_device *dev)
2469 {
2470         struct port_info *pi = netdev_priv(dev);
2471         struct adapter *adapter = pi->adapter;
2472         int qidx;
2473
2474         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2475                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2476                 void *source;
2477
2478                 if (adapter->flags & USING_MSIX)
2479                         source = qs;
2480                 else
2481                         source = adapter;
2482
2483                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2484         }
2485 }
2486 #endif
2487
2488 /*
2489  * Periodic accumulation of MAC statistics.
2490  */
2491 static void mac_stats_update(struct adapter *adapter)
2492 {
2493         int i;
2494
2495         for_each_port(adapter, i) {
2496                 struct net_device *dev = adapter->port[i];
2497                 struct port_info *p = netdev_priv(dev);
2498
2499                 if (netif_running(dev)) {
2500                         spin_lock(&adapter->stats_lock);
2501                         t3_mac_update_stats(&p->mac);
2502                         spin_unlock(&adapter->stats_lock);
2503                 }
2504         }
2505 }
2506
2507 static void check_link_status(struct adapter *adapter)
2508 {
2509         int i;
2510
2511         for_each_port(adapter, i) {
2512                 struct net_device *dev = adapter->port[i];
2513                 struct port_info *p = netdev_priv(dev);
2514                 int link_fault;
2515
2516                 spin_lock_irq(&adapter->work_lock);
2517                 link_fault = p->link_fault;
2518                 spin_unlock_irq(&adapter->work_lock);
2519
2520                 if (link_fault) {
2521                         t3_link_fault(adapter, i);
2522                         continue;
2523                 }
2524
2525                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
2526                         t3_xgm_intr_disable(adapter, i);
2527                         t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2528
2529                         t3_link_changed(adapter, i);
2530                         t3_xgm_intr_enable(adapter, i);
2531                 }
2532         }
2533 }
2534
2535 static void check_t3b2_mac(struct adapter *adapter)
2536 {
2537         int i;
2538
2539         if (!rtnl_trylock())    /* synchronize with ifdown */
2540                 return;
2541
2542         for_each_port(adapter, i) {
2543                 struct net_device *dev = adapter->port[i];
2544                 struct port_info *p = netdev_priv(dev);
2545                 int status;
2546
2547                 if (!netif_running(dev))
2548                         continue;
2549
2550                 status = 0;
2551                 if (netif_running(dev) && netif_carrier_ok(dev))
2552                         status = t3b2_mac_watchdog_task(&p->mac);
2553                 if (status == 1)
2554                         p->mac.stats.num_toggled++;
2555                 else if (status == 2) {
2556                         struct cmac *mac = &p->mac;
2557
2558                         t3_mac_set_mtu(mac, dev->mtu);
2559                         t3_mac_set_address(mac, 0, dev->dev_addr);
2560                         cxgb_set_rxmode(dev);
2561                         t3_link_start(&p->phy, mac, &p->link_config);
2562                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2563                         t3_port_intr_enable(adapter, p->port_id);
2564                         p->mac.stats.num_resets++;
2565                 }
2566         }
2567         rtnl_unlock();
2568 }
2569
2570
2571 static void t3_adap_check_task(struct work_struct *work)
2572 {
2573         struct adapter *adapter = container_of(work, struct adapter,
2574                                                adap_check_task.work);
2575         const struct adapter_params *p = &adapter->params;
2576         int port;
2577         unsigned int v, status, reset;
2578
2579         adapter->check_task_cnt++;
2580
2581         check_link_status(adapter);
2582
2583         /* Accumulate MAC stats if needed */
2584         if (!p->linkpoll_period ||
2585             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2586             p->stats_update_period) {
2587                 mac_stats_update(adapter);
2588                 adapter->check_task_cnt = 0;
2589         }
2590
2591         if (p->rev == T3_REV_B2)
2592                 check_t3b2_mac(adapter);
2593
2594         /*
2595          * Scan the XGMAC's to check for various conditions which we want to
2596          * monitor in a periodic polling manner rather than via an interrupt
2597          * condition.  This is used for conditions which would otherwise flood
2598          * the system with interrupts and we only really need to know that the
2599          * conditions are "happening" ...  For each condition we count the
2600          * detection of the condition and reset it for the next polling loop.
2601          */
2602         for_each_port(adapter, port) {
2603                 struct cmac *mac =  &adap2pinfo(adapter, port)->mac;
2604                 u32 cause;
2605
2606                 cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
2607                 reset = 0;
2608                 if (cause & F_RXFIFO_OVERFLOW) {
2609                         mac->stats.rx_fifo_ovfl++;
2610                         reset |= F_RXFIFO_OVERFLOW;
2611                 }
2612
2613                 t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
2614         }
2615
2616         /*
2617          * We do the same as above for FL_EMPTY interrupts.
2618          */
2619         status = t3_read_reg(adapter, A_SG_INT_CAUSE);
2620         reset = 0;
2621
2622         if (status & F_FLEMPTY) {
2623                 struct sge_qset *qs = &adapter->sge.qs[0];
2624                 int i = 0;
2625
2626                 reset |= F_FLEMPTY;
2627
2628                 v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
2629                     0xffff;
2630
2631                 while (v) {
2632                         qs->fl[i].empty += (v & 1);
2633                         if (i)
2634                                 qs++;
2635                         i ^= 1;
2636                         v >>= 1;
2637                 }
2638         }
2639
2640         t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
2641
2642         /* Schedule the next check update if any port is active. */
2643         spin_lock_irq(&adapter->work_lock);
2644         if (adapter->open_device_map & PORT_MASK)
2645                 schedule_chk_task(adapter);
2646         spin_unlock_irq(&adapter->work_lock);
2647 }
2648
2649 /*
2650  * Processes external (PHY) interrupts in process context.
2651  */
2652 static void ext_intr_task(struct work_struct *work)
2653 {
2654         struct adapter *adapter = container_of(work, struct adapter,
2655                                                ext_intr_handler_task);
2656         int i;
2657
2658         /* Disable link fault interrupts */
2659         for_each_port(adapter, i) {
2660                 struct net_device *dev = adapter->port[i];
2661                 struct port_info *p = netdev_priv(dev);
2662
2663                 t3_xgm_intr_disable(adapter, i);
2664                 t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
2665         }
2666
2667         /* Re-enable link fault interrupts */
2668         t3_phy_intr_handler(adapter);
2669
2670         for_each_port(adapter, i)
2671                 t3_xgm_intr_enable(adapter, i);
2672
2673         /* Now reenable external interrupts */
2674         spin_lock_irq(&adapter->work_lock);
2675         if (adapter->slow_intr_mask) {
2676                 adapter->slow_intr_mask |= F_T3DBG;
2677                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2678                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2679                              adapter->slow_intr_mask);
2680         }
2681         spin_unlock_irq(&adapter->work_lock);
2682 }
2683
2684 /*
2685  * Interrupt-context handler for external (PHY) interrupts.
2686  */
2687 void t3_os_ext_intr_handler(struct adapter *adapter)
2688 {
2689         /*
2690          * Schedule a task to handle external interrupts as they may be slow
2691          * and we use a mutex to protect MDIO registers.  We disable PHY
2692          * interrupts in the meantime and let the task reenable them when
2693          * it's done.
2694          */
2695         spin_lock(&adapter->work_lock);
2696         if (adapter->slow_intr_mask) {
2697                 adapter->slow_intr_mask &= ~F_T3DBG;
2698                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2699                              adapter->slow_intr_mask);
2700                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2701         }
2702         spin_unlock(&adapter->work_lock);
2703 }
2704
2705 void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
2706 {
2707         struct net_device *netdev = adapter->port[port_id];
2708         struct port_info *pi = netdev_priv(netdev);
2709
2710         spin_lock(&adapter->work_lock);
2711         pi->link_fault = 1;
2712         spin_unlock(&adapter->work_lock);
2713 }
2714
2715 static int t3_adapter_error(struct adapter *adapter, int reset)
2716 {
2717         int i, ret = 0;
2718
2719         if (is_offload(adapter) &&
2720             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2721                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
2722                 offload_close(&adapter->tdev);
2723         }
2724
2725         /* Stop all ports */
2726         for_each_port(adapter, i) {
2727                 struct net_device *netdev = adapter->port[i];
2728
2729                 if (netif_running(netdev))
2730                         cxgb_close(netdev);
2731         }
2732
2733         /* Stop SGE timers */
2734         t3_stop_sge_timers(adapter);
2735
2736         adapter->flags &= ~FULL_INIT_DONE;
2737
2738         if (reset)
2739                 ret = t3_reset_adapter(adapter);
2740
2741         pci_disable_device(adapter->pdev);
2742
2743         return ret;
2744 }
2745
2746 static int t3_reenable_adapter(struct adapter *adapter)
2747 {
2748         if (pci_enable_device(adapter->pdev)) {
2749                 dev_err(&adapter->pdev->dev,
2750                         "Cannot re-enable PCI device after reset.\n");
2751                 goto err;
2752         }
2753         pci_set_master(adapter->pdev);
2754         pci_restore_state(adapter->pdev);
2755
2756         /* Free sge resources */
2757         t3_free_sge_resources(adapter);
2758
2759         if (t3_replay_prep_adapter(adapter))
2760                 goto err;
2761
2762         return 0;
2763 err:
2764         return -1;
2765 }
2766
2767 static void t3_resume_ports(struct adapter *adapter)
2768 {
2769         int i;
2770
2771         /* Restart the ports */
2772         for_each_port(adapter, i) {
2773                 struct net_device *netdev = adapter->port[i];
2774
2775                 if (netif_running(netdev)) {
2776                         if (cxgb_open(netdev)) {
2777                                 dev_err(&adapter->pdev->dev,
2778                                         "can't bring device back up"
2779                                         " after reset\n");
2780                                 continue;
2781                         }
2782                 }
2783         }
2784
2785         if (is_offload(adapter) && !ofld_disable)
2786                 cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
2787 }
2788
2789 /*
2790  * processes a fatal error.
2791  * Bring the ports down, reset the chip, bring the ports back up.
2792  */
2793 static void fatal_error_task(struct work_struct *work)
2794 {
2795         struct adapter *adapter = container_of(work, struct adapter,
2796                                                fatal_error_handler_task);
2797         int err = 0;
2798
2799         rtnl_lock();
2800         err = t3_adapter_error(adapter, 1);
2801         if (!err)
2802                 err = t3_reenable_adapter(adapter);
2803         if (!err)
2804                 t3_resume_ports(adapter);
2805
2806         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2807         rtnl_unlock();
2808 }
2809
2810 void t3_fatal_err(struct adapter *adapter)
2811 {
2812         unsigned int fw_status[4];
2813
2814         if (adapter->flags & FULL_INIT_DONE) {
2815                 t3_sge_stop(adapter);
2816                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2817                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2818                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2819                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2820
2821                 spin_lock(&adapter->work_lock);
2822                 t3_intr_disable(adapter);
2823                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2824                 spin_unlock(&adapter->work_lock);
2825         }
2826         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2827         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2828                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2829                          fw_status[0], fw_status[1],
2830                          fw_status[2], fw_status[3]);
2831 }
2832
2833 /**
2834  * t3_io_error_detected - called when PCI error is detected
2835  * @pdev: Pointer to PCI device
2836  * @state: The current pci connection state
2837  *
2838  * This function is called after a PCI bus error affecting
2839  * this device has been detected.
2840  */
2841 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2842                                              pci_channel_state_t state)
2843 {
2844         struct adapter *adapter = pci_get_drvdata(pdev);
2845         int ret;
2846
2847         if (state == pci_channel_io_perm_failure)
2848                 return PCI_ERS_RESULT_DISCONNECT;
2849
2850         ret = t3_adapter_error(adapter, 0);
2851
2852         /* Request a slot reset. */
2853         return PCI_ERS_RESULT_NEED_RESET;
2854 }
2855
2856 /**
2857  * t3_io_slot_reset - called after the pci bus has been reset.
2858  * @pdev: Pointer to PCI device
2859  *
2860  * Restart the card from scratch, as if from a cold-boot.
2861  */
2862 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2863 {
2864         struct adapter *adapter = pci_get_drvdata(pdev);
2865
2866         if (!t3_reenable_adapter(adapter))
2867                 return PCI_ERS_RESULT_RECOVERED;
2868
2869         return PCI_ERS_RESULT_DISCONNECT;
2870 }
2871
2872 /**
2873  * t3_io_resume - called when traffic can start flowing again.
2874  * @pdev: Pointer to PCI device
2875  *
2876  * This callback is called when the error recovery driver tells us that
2877  * its OK to resume normal operation.
2878  */
2879 static void t3_io_resume(struct pci_dev *pdev)
2880 {
2881         struct adapter *adapter = pci_get_drvdata(pdev);
2882
2883         CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
2884                  t3_read_reg(adapter, A_PCIE_PEX_ERR));
2885
2886         t3_resume_ports(adapter);
2887 }
2888
2889 static struct pci_error_handlers t3_err_handler = {
2890         .error_detected = t3_io_error_detected,
2891         .slot_reset = t3_io_slot_reset,
2892         .resume = t3_io_resume,
2893 };
2894
2895 /*
2896  * Set the number of qsets based on the number of CPUs and the number of ports,
2897  * not to exceed the number of available qsets, assuming there are enough qsets
2898  * per port in HW.
2899  */
2900 static void set_nqsets(struct adapter *adap)
2901 {
2902         int i, j = 0;
2903         int num_cpus = num_online_cpus();
2904         int hwports = adap->params.nports;
2905         int nqsets = adap->msix_nvectors - 1;
2906
2907         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2908                 if (hwports == 2 &&
2909                     (hwports * nqsets > SGE_QSETS ||
2910                      num_cpus >= nqsets / hwports))
2911                         nqsets /= hwports;
2912                 if (nqsets > num_cpus)
2913                         nqsets = num_cpus;
2914                 if (nqsets < 1 || hwports == 4)
2915                         nqsets = 1;
2916         } else
2917                 nqsets = 1;
2918
2919         for_each_port(adap, i) {
2920                 struct port_info *pi = adap2pinfo(adap, i);
2921
2922                 pi->first_qset = j;
2923                 pi->nqsets = nqsets;
2924                 j = pi->first_qset + nqsets;
2925
2926                 dev_info(&adap->pdev->dev,
2927                          "Port %d using %d queue sets.\n", i, nqsets);
2928         }
2929 }
2930
2931 static int __devinit cxgb_enable_msix(struct adapter *adap)
2932 {
2933         struct msix_entry entries[SGE_QSETS + 1];
2934         int vectors;
2935         int i, err;
2936
2937         vectors = ARRAY_SIZE(entries);
2938         for (i = 0; i < vectors; ++i)
2939                 entries[i].entry = i;
2940
2941         while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
2942                 vectors = err;
2943
2944         if (err < 0)
2945                 pci_disable_msix(adap->pdev);
2946
2947         if (!err && vectors < (adap->params.nports + 1)) {
2948                 pci_disable_msix(adap->pdev);
2949                 err = -1;
2950         }
2951
2952         if (!err) {
2953                 for (i = 0; i < vectors; ++i)
2954                         adap->msix_info[i].vec = entries[i].vector;
2955                 adap->msix_nvectors = vectors;
2956         }
2957
2958         return err;
2959 }
2960
2961 static void __devinit print_port_info(struct adapter *adap,
2962                                       const struct adapter_info *ai)
2963 {
2964         static const char *pci_variant[] = {
2965                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2966         };
2967
2968         int i;
2969         char buf[80];
2970
2971         if (is_pcie(adap))
2972                 snprintf(buf, sizeof(buf), "%s x%d",
2973                          pci_variant[adap->params.pci.variant],
2974                          adap->params.pci.width);
2975         else
2976                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2977                          pci_variant[adap->params.pci.variant],
2978                          adap->params.pci.speed, adap->params.pci.width);
2979
2980         for_each_port(adap, i) {
2981                 struct net_device *dev = adap->port[i];
2982                 const struct port_info *pi = netdev_priv(dev);
2983
2984                 if (!test_bit(i, &adap->registered_device_map))
2985                         continue;
2986                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2987                        dev->name, ai->desc, pi->phy.desc,
2988                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2989                        (adap->flags & USING_MSIX) ? " MSI-X" :
2990                        (adap->flags & USING_MSI) ? " MSI" : "");
2991                 if (adap->name == dev->name && adap->params.vpd.mclk)
2992                         printk(KERN_INFO
2993                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2994                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2995                                t3_mc7_size(&adap->pmtx) >> 20,
2996                                t3_mc7_size(&adap->pmrx) >> 20,
2997                                adap->params.vpd.sn);
2998         }
2999 }
3000
3001 static const struct net_device_ops cxgb_netdev_ops = {
3002         .ndo_open               = cxgb_open,
3003         .ndo_stop               = cxgb_close,
3004         .ndo_start_xmit         = t3_eth_xmit,
3005         .ndo_get_stats          = cxgb_get_stats,
3006         .ndo_validate_addr      = eth_validate_addr,
3007         .ndo_set_multicast_list = cxgb_set_rxmode,
3008         .ndo_do_ioctl           = cxgb_ioctl,
3009         .ndo_change_mtu         = cxgb_change_mtu,
3010         .ndo_set_mac_address    = cxgb_set_mac_addr,
3011         .ndo_vlan_rx_register   = vlan_rx_register,
3012 #ifdef CONFIG_NET_POLL_CONTROLLER
3013         .ndo_poll_controller    = cxgb_netpoll,
3014 #endif
3015 };
3016
3017 static int __devinit init_one(struct pci_dev *pdev,
3018                               const struct pci_device_id *ent)
3019 {
3020         static int version_printed;
3021
3022         int i, err, pci_using_dac = 0;
3023         resource_size_t mmio_start, mmio_len;
3024         const struct adapter_info *ai;
3025         struct adapter *adapter = NULL;
3026         struct port_info *pi;
3027
3028         if (!version_printed) {
3029                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3030                 ++version_printed;
3031         }
3032
3033         if (!cxgb3_wq) {
3034                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
3035                 if (!cxgb3_wq) {
3036                         printk(KERN_ERR DRV_NAME
3037                                ": cannot initialize work queue\n");
3038                         return -ENOMEM;
3039                 }
3040         }
3041
3042         err = pci_request_regions(pdev, DRV_NAME);
3043         if (err) {
3044                 /* Just info, some other driver may have claimed the device. */
3045                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3046                 return err;
3047         }
3048
3049         err = pci_enable_device(pdev);
3050         if (err) {
3051                 dev_err(&pdev->dev, "cannot enable PCI device\n");
3052                 goto out_release_regions;
3053         }
3054
3055         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3056                 pci_using_dac = 1;
3057                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3058                 if (err) {
3059                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3060                                "coherent allocations\n");
3061                         goto out_disable_device;
3062                 }
3063         } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3064                 dev_err(&pdev->dev, "no usable DMA configuration\n");
3065                 goto out_disable_device;
3066         }
3067
3068         pci_set_master(pdev);
3069         pci_save_state(pdev);
3070
3071         mmio_start = pci_resource_start(pdev, 0);
3072         mmio_len = pci_resource_len(pdev, 0);
3073         ai = t3_get_adapter_info(ent->driver_data);
3074
3075         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3076         if (!adapter) {
3077                 err = -ENOMEM;
3078                 goto out_disable_device;
3079         }
3080
3081         adapter->nofail_skb =
3082                 alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
3083         if (!adapter->nofail_skb) {
3084                 dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
3085                 err = -ENOMEM;
3086                 goto out_free_adapter;
3087         }
3088
3089         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
3090         if (!adapter->regs) {
3091                 dev_err(&pdev->dev, "cannot map device registers\n");
3092                 err = -ENOMEM;
3093                 goto out_free_adapter;
3094         }
3095
3096         adapter->pdev = pdev;
3097         adapter->name = pci_name(pdev);
3098         adapter->msg_enable = dflt_msg_enable;
3099         adapter->mmio_len = mmio_len;
3100
3101         mutex_init(&adapter->mdio_lock);
3102         spin_lock_init(&adapter->work_lock);
3103         spin_lock_init(&adapter->stats_lock);
3104
3105         INIT_LIST_HEAD(&adapter->adapter_list);
3106         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
3107         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
3108         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
3109
3110         for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
3111                 struct net_device *netdev;
3112
3113                 netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
3114                 if (!netdev) {
3115                         err = -ENOMEM;
3116                         goto out_free_dev;
3117                 }
3118
3119                 SET_NETDEV_DEV(netdev, &pdev->dev);
3120
3121                 adapter->port[i] = netdev;
3122                 pi = netdev_priv(netdev);
3123                 pi->adapter = adapter;
3124                 pi->rx_offload = T3_RX_CSUM | T3_LRO;
3125                 pi->port_id = i;
3126                 netif_carrier_off(netdev);
3127                 netif_tx_stop_all_queues(netdev);
3128                 netdev->irq = pdev->irq;
3129                 netdev->mem_start = mmio_start;
3130                 netdev->mem_end = mmio_start + mmio_len - 1;
3131                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3132                 netdev->features |= NETIF_F_GRO;
3133                 if (pci_using_dac)
3134                         netdev->features |= NETIF_F_HIGHDMA;
3135
3136                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3137                 netdev->netdev_ops = &cxgb_netdev_ops;
3138                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3139         }
3140
3141         pci_set_drvdata(pdev, adapter);
3142         if (t3_prep_adapter(adapter, ai, 1) < 0) {
3143                 err = -ENODEV;
3144                 goto out_free_dev;
3145         }
3146
3147         /*
3148          * The card is now ready to go.  If any errors occur during device
3149          * registration we do not fail the whole card but rather proceed only
3150          * with the ports we manage to register successfully.  However we must
3151          * register at least one net device.
3152          */
3153         for_each_port(adapter, i) {
3154                 err = register_netdev(adapter->port[i]);
3155                 if (err)
3156                         dev_warn(&pdev->dev,
3157                                  "cannot register net device %s, skipping\n",
3158                                  adapter->port[i]->name);
3159                 else {
3160                         /*
3161                          * Change the name we use for messages to the name of
3162                          * the first successfully registered interface.
3163                          */
3164                         if (!adapter->registered_device_map)
3165                                 adapter->name = adapter->port[i]->name;
3166
3167                         __set_bit(i, &adapter->registered_device_map);
3168                 }
3169         }
3170         if (!adapter->registered_device_map) {
3171                 dev_err(&pdev->dev, "could not register any net devices\n");
3172                 goto out_free_dev;
3173         }
3174
3175         /* Driver's ready. Reflect it on LEDs */
3176         t3_led_ready(adapter);
3177
3178         if (is_offload(adapter)) {
3179                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3180                 cxgb3_adapter_ofld(adapter);
3181         }
3182
3183         /* See what interrupts we'll be using */
3184         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3185                 adapter->flags |= USING_MSIX;
3186         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3187                 adapter->flags |= USING_MSI;
3188
3189         set_nqsets(adapter);
3190
3191         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3192                                  &cxgb3_attr_group);
3193
3194         print_port_info(adapter, ai);
3195         return 0;
3196
3197 out_free_dev:
3198         iounmap(adapter->regs);
3199         for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
3200                 if (adapter->port[i])
3201                         free_netdev(adapter->port[i]);
3202
3203 out_free_adapter:
3204         kfree(adapter);
3205
3206 out_disable_device:
3207         pci_disable_device(pdev);
3208 out_release_regions:
3209         pci_release_regions(pdev);
3210         pci_set_drvdata(pdev, NULL);
3211         return err;
3212 }
3213
3214 static void __devexit remove_one(struct pci_dev *pdev)
3215 {
3216         struct adapter *adapter = pci_get_drvdata(pdev);
3217
3218         if (adapter) {
3219                 int i;
3220
3221                 t3_sge_stop(adapter);
3222                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3223                                    &cxgb3_attr_group);
3224
3225                 if (is_offload(adapter)) {
3226                         cxgb3_adapter_unofld(adapter);
3227                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3228                                      &adapter->open_device_map))
3229                                 offload_close(&adapter->tdev);
3230                 }
3231
3232                 for_each_port(adapter, i)
3233                     if (test_bit(i, &adapter->registered_device_map))
3234                         unregister_netdev(adapter->port[i]);
3235
3236                 t3_stop_sge_timers(adapter);
3237                 t3_free_sge_resources(adapter);
3238                 cxgb_disable_msi(adapter);
3239
3240                 for_each_port(adapter, i)
3241                         if (adapter->port[i])
3242                                 free_netdev(adapter->port[i]);
3243
3244                 iounmap(adapter->regs);
3245                 if (adapter->nofail_skb)
3246                         kfree_skb(adapter->nofail_skb);
3247                 kfree(adapter);
3248                 pci_release_regions(pdev);
3249                 pci_disable_device(pdev);
3250                 pci_set_drvdata(pdev, NULL);
3251         }
3252 }
3253
3254 static struct pci_driver driver = {
3255         .name = DRV_NAME,
3256         .id_table = cxgb3_pci_tbl,
3257         .probe = init_one,
3258         .remove = __devexit_p(remove_one),
3259         .err_handler = &t3_err_handler,
3260 };
3261
3262 static int __init cxgb3_init_module(void)
3263 {
3264         int ret;
3265
3266         cxgb3_offload_init();
3267
3268         ret = pci_register_driver(&driver);
3269         return ret;
3270 }
3271
3272 static void __exit cxgb3_cleanup_module(void)
3273 {
3274         pci_unregister_driver(&driver);
3275         if (cxgb3_wq)
3276                 destroy_workqueue(cxgb3_wq);
3277 }
3278
3279 module_init(cxgb3_init_module);
3280 module_exit(cxgb3_cleanup_module);