Merge master.kernel.org:/pub/scm/linux/kernel/git/gregkh/driver-2.6
[pandora-kernel.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <asm/uaccess.h>
46
47 #include "common.h"
48 #include "cxgb3_ioctl.h"
49 #include "regs.h"
50 #include "cxgb3_offload.h"
51 #include "version.h"
52
53 #include "cxgb3_ctl_defs.h"
54 #include "t3_cpl.h"
55 #include "firmware_exports.h"
56
57 enum {
58         MAX_TXQ_ENTRIES = 16384,
59         MAX_CTRL_TXQ_ENTRIES = 1024,
60         MAX_RSPQ_ENTRIES = 16384,
61         MAX_RX_BUFFERS = 16384,
62         MAX_RX_JUMBO_BUFFERS = 16384,
63         MIN_TXQ_ENTRIES = 4,
64         MIN_CTRL_TXQ_ENTRIES = 4,
65         MIN_RSPQ_ENTRIES = 32,
66         MIN_FL_ENTRIES = 32
67 };
68
69 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
70
71 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
72                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
73                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74
75 #define EEPROM_MAGIC 0x38E2F10C
76
77 #define to_net_dev(class) container_of(class, struct net_device, class_dev)
78
79 #define CH_DEVICE(devid, ssid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 1, 0),  /* PE9000 */
84         CH_DEVICE(0x21, 1, 1),  /* T302E */
85         CH_DEVICE(0x22, 1, 2),  /* T310E */
86         CH_DEVICE(0x23, 1, 3),  /* T320X */
87         CH_DEVICE(0x24, 1, 1),  /* T302X */
88         CH_DEVICE(0x25, 1, 3),  /* T320E */
89         CH_DEVICE(0x26, 1, 2),  /* T310X */
90         CH_DEVICE(0x30, 1, 2),  /* T3B10 */
91         CH_DEVICE(0x31, 1, 3),  /* T3B20 */
92         CH_DEVICE(0x32, 1, 1),  /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189
190         /* Skip changes from disabled ports. */
191         if (!netif_running(dev))
192                 return;
193
194         if (link_stat != netif_carrier_ok(dev)) {
195                 if (link_stat)
196                         netif_carrier_on(dev);
197                 else
198                         netif_carrier_off(dev);
199                 link_report(dev);
200         }
201 }
202
203 static void cxgb_set_rxmode(struct net_device *dev)
204 {
205         struct t3_rx_mode rm;
206         struct port_info *pi = netdev_priv(dev);
207
208         init_rx_mode(&rm, dev, dev->mc_list);
209         t3_mac_set_rx_mode(&pi->mac, &rm);
210 }
211
212 /**
213  *      link_start - enable a port
214  *      @dev: the device to enable
215  *
216  *      Performs the MAC and PHY actions needed to enable a port.
217  */
218 static void link_start(struct net_device *dev)
219 {
220         struct t3_rx_mode rm;
221         struct port_info *pi = netdev_priv(dev);
222         struct cmac *mac = &pi->mac;
223
224         init_rx_mode(&rm, dev, dev->mc_list);
225         t3_mac_reset(mac);
226         t3_mac_set_mtu(mac, dev->mtu);
227         t3_mac_set_address(mac, 0, dev->dev_addr);
228         t3_mac_set_rx_mode(mac, &rm);
229         t3_link_start(&pi->phy, mac, &pi->link_config);
230         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
231 }
232
233 static inline void cxgb_disable_msi(struct adapter *adapter)
234 {
235         if (adapter->flags & USING_MSIX) {
236                 pci_disable_msix(adapter->pdev);
237                 adapter->flags &= ~USING_MSIX;
238         } else if (adapter->flags & USING_MSI) {
239                 pci_disable_msi(adapter->pdev);
240                 adapter->flags &= ~USING_MSI;
241         }
242 }
243
244 /*
245  * Interrupt handler for asynchronous events used with MSI-X.
246  */
247 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
248 {
249         t3_slow_intr_handler(cookie);
250         return IRQ_HANDLED;
251 }
252
253 /*
254  * Name the MSI-X interrupts.
255  */
256 static void name_msix_vecs(struct adapter *adap)
257 {
258         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
259
260         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
261         adap->msix_info[0].desc[n] = 0;
262
263         for_each_port(adap, j) {
264                 struct net_device *d = adap->port[j];
265                 const struct port_info *pi = netdev_priv(d);
266
267                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
268                         snprintf(adap->msix_info[msi_idx].desc, n,
269                                  "%s (queue %d)", d->name, i);
270                         adap->msix_info[msi_idx].desc[n] = 0;
271                 }
272         }
273 }
274
275 static int request_msix_data_irqs(struct adapter *adap)
276 {
277         int i, j, err, qidx = 0;
278
279         for_each_port(adap, i) {
280                 int nqsets = adap2pinfo(adap, i)->nqsets;
281
282                 for (j = 0; j < nqsets; ++j) {
283                         err = request_irq(adap->msix_info[qidx + 1].vec,
284                                           t3_intr_handler(adap,
285                                                           adap->sge.qs[qidx].
286                                                           rspq.polling), 0,
287                                           adap->msix_info[qidx + 1].desc,
288                                           &adap->sge.qs[qidx]);
289                         if (err) {
290                                 while (--qidx >= 0)
291                                         free_irq(adap->msix_info[qidx + 1].vec,
292                                                  &adap->sge.qs[qidx]);
293                                 return err;
294                         }
295                         qidx++;
296                 }
297         }
298         return 0;
299 }
300
301 /**
302  *      setup_rss - configure RSS
303  *      @adap: the adapter
304  *
305  *      Sets up RSS to distribute packets to multiple receive queues.  We
306  *      configure the RSS CPU lookup table to distribute to the number of HW
307  *      receive queues, and the response queue lookup table to narrow that
308  *      down to the response queues actually configured for each port.
309  *      We always configure the RSS mapping for two ports since the mapping
310  *      table has plenty of entries.
311  */
312 static void setup_rss(struct adapter *adap)
313 {
314         int i;
315         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
316         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
317         u8 cpus[SGE_QSETS + 1];
318         u16 rspq_map[RSS_TABLE_SIZE];
319
320         for (i = 0; i < SGE_QSETS; ++i)
321                 cpus[i] = i;
322         cpus[SGE_QSETS] = 0xff; /* terminator */
323
324         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
325                 rspq_map[i] = i % nq0;
326                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
327         }
328
329         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
330                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
331                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
332 }
333
334 /*
335  * If we have multiple receive queues per port serviced by NAPI we need one
336  * netdevice per queue as NAPI operates on netdevices.  We already have one
337  * netdevice, namely the one associated with the interface, so we use dummy
338  * ones for any additional queues.  Note that these netdevices exist purely
339  * so that NAPI has something to work with, they do not represent network
340  * ports and are not registered.
341  */
342 static int init_dummy_netdevs(struct adapter *adap)
343 {
344         int i, j, dummy_idx = 0;
345         struct net_device *nd;
346
347         for_each_port(adap, i) {
348                 struct net_device *dev = adap->port[i];
349                 const struct port_info *pi = netdev_priv(dev);
350
351                 for (j = 0; j < pi->nqsets - 1; j++) {
352                         if (!adap->dummy_netdev[dummy_idx]) {
353                                 nd = alloc_netdev(0, "", ether_setup);
354                                 if (!nd)
355                                         goto free_all;
356
357                                 nd->priv = adap;
358                                 nd->weight = 64;
359                                 set_bit(__LINK_STATE_START, &nd->state);
360                                 adap->dummy_netdev[dummy_idx] = nd;
361                         }
362                         strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
363                         dummy_idx++;
364                 }
365         }
366         return 0;
367
368 free_all:
369         while (--dummy_idx >= 0) {
370                 free_netdev(adap->dummy_netdev[dummy_idx]);
371                 adap->dummy_netdev[dummy_idx] = NULL;
372         }
373         return -ENOMEM;
374 }
375
376 /*
377  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
378  * both netdevices representing interfaces and the dummy ones for the extra
379  * queues.
380  */
381 static void quiesce_rx(struct adapter *adap)
382 {
383         int i;
384         struct net_device *dev;
385
386         for_each_port(adap, i) {
387                 dev = adap->port[i];
388                 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
389                         msleep(1);
390         }
391
392         for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
393                 dev = adap->dummy_netdev[i];
394                 if (dev)
395                         while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
396                                 msleep(1);
397         }
398 }
399
400 /**
401  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
402  *      @adap: the adapter
403  *
404  *      Determines how many sets of SGE queues to use and initializes them.
405  *      We support multiple queue sets per port if we have MSI-X, otherwise
406  *      just one queue set per port.
407  */
408 static int setup_sge_qsets(struct adapter *adap)
409 {
410         int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
411         unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
412
413         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
414                 irq_idx = -1;
415
416         for_each_port(adap, i) {
417                 struct net_device *dev = adap->port[i];
418                 const struct port_info *pi = netdev_priv(dev);
419
420                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
421                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
422                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
423                                                              irq_idx,
424                                 &adap->params.sge.qset[qset_idx], ntxq,
425                                 j == 0 ? dev :
426                                          adap-> dummy_netdev[dummy_dev_idx++]);
427                         if (err) {
428                                 t3_free_sge_resources(adap);
429                                 return err;
430                         }
431                 }
432         }
433
434         return 0;
435 }
436
437 static ssize_t attr_show(struct class_device *cd, char *buf,
438                          ssize_t(*format) (struct adapter *, char *))
439 {
440         ssize_t len;
441         struct adapter *adap = to_net_dev(cd)->priv;
442
443         /* Synchronize with ioctls that may shut down the device */
444         rtnl_lock();
445         len = (*format) (adap, buf);
446         rtnl_unlock();
447         return len;
448 }
449
450 static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
451                           ssize_t(*set) (struct adapter *, unsigned int),
452                           unsigned int min_val, unsigned int max_val)
453 {
454         char *endp;
455         ssize_t ret;
456         unsigned int val;
457         struct adapter *adap = to_net_dev(cd)->priv;
458
459         if (!capable(CAP_NET_ADMIN))
460                 return -EPERM;
461
462         val = simple_strtoul(buf, &endp, 0);
463         if (endp == buf || val < min_val || val > max_val)
464                 return -EINVAL;
465
466         rtnl_lock();
467         ret = (*set) (adap, val);
468         if (!ret)
469                 ret = len;
470         rtnl_unlock();
471         return ret;
472 }
473
474 #define CXGB3_SHOW(name, val_expr) \
475 static ssize_t format_##name(struct adapter *adap, char *buf) \
476 { \
477         return sprintf(buf, "%u\n", val_expr); \
478 } \
479 static ssize_t show_##name(struct class_device *cd, char *buf) \
480 { \
481         return attr_show(cd, buf, format_##name); \
482 }
483
484 static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
485 {
486         if (adap->flags & FULL_INIT_DONE)
487                 return -EBUSY;
488         if (val && adap->params.rev == 0)
489                 return -EINVAL;
490         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
491                 return -EINVAL;
492         adap->params.mc5.nfilters = val;
493         return 0;
494 }
495
496 static ssize_t store_nfilters(struct class_device *cd, const char *buf,
497                               size_t len)
498 {
499         return attr_store(cd, buf, len, set_nfilters, 0, ~0);
500 }
501
502 static ssize_t set_nservers(struct adapter *adap, unsigned int val)
503 {
504         if (adap->flags & FULL_INIT_DONE)
505                 return -EBUSY;
506         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
507                 return -EINVAL;
508         adap->params.mc5.nservers = val;
509         return 0;
510 }
511
512 static ssize_t store_nservers(struct class_device *cd, const char *buf,
513                               size_t len)
514 {
515         return attr_store(cd, buf, len, set_nservers, 0, ~0);
516 }
517
518 #define CXGB3_ATTR_R(name, val_expr) \
519 CXGB3_SHOW(name, val_expr) \
520 static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
521
522 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
523 CXGB3_SHOW(name, val_expr) \
524 static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
525
526 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
527 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
528 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
529
530 static struct attribute *cxgb3_attrs[] = {
531         &class_device_attr_cam_size.attr,
532         &class_device_attr_nfilters.attr,
533         &class_device_attr_nservers.attr,
534         NULL
535 };
536
537 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
538
539 static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
540 {
541         ssize_t len;
542         unsigned int v, addr, bpt, cpt;
543         struct adapter *adap = to_net_dev(cd)->priv;
544
545         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
546         rtnl_lock();
547         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
548         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
549         if (sched & 1)
550                 v >>= 16;
551         bpt = (v >> 8) & 0xff;
552         cpt = v & 0xff;
553         if (!cpt)
554                 len = sprintf(buf, "disabled\n");
555         else {
556                 v = (adap->params.vpd.cclk * 1000) / cpt;
557                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
558         }
559         rtnl_unlock();
560         return len;
561 }
562
563 static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
564                              size_t len, int sched)
565 {
566         char *endp;
567         ssize_t ret;
568         unsigned int val;
569         struct adapter *adap = to_net_dev(cd)->priv;
570
571         if (!capable(CAP_NET_ADMIN))
572                 return -EPERM;
573
574         val = simple_strtoul(buf, &endp, 0);
575         if (endp == buf || val > 10000000)
576                 return -EINVAL;
577
578         rtnl_lock();
579         ret = t3_config_sched(adap, val, sched);
580         if (!ret)
581                 ret = len;
582         rtnl_unlock();
583         return ret;
584 }
585
586 #define TM_ATTR(name, sched) \
587 static ssize_t show_##name(struct class_device *cd, char *buf) \
588 { \
589         return tm_attr_show(cd, buf, sched); \
590 } \
591 static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
592 { \
593         return tm_attr_store(cd, buf, len, sched); \
594 } \
595 static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
596
597 TM_ATTR(sched0, 0);
598 TM_ATTR(sched1, 1);
599 TM_ATTR(sched2, 2);
600 TM_ATTR(sched3, 3);
601 TM_ATTR(sched4, 4);
602 TM_ATTR(sched5, 5);
603 TM_ATTR(sched6, 6);
604 TM_ATTR(sched7, 7);
605
606 static struct attribute *offload_attrs[] = {
607         &class_device_attr_sched0.attr,
608         &class_device_attr_sched1.attr,
609         &class_device_attr_sched2.attr,
610         &class_device_attr_sched3.attr,
611         &class_device_attr_sched4.attr,
612         &class_device_attr_sched5.attr,
613         &class_device_attr_sched6.attr,
614         &class_device_attr_sched7.attr,
615         NULL
616 };
617
618 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
619
620 /*
621  * Sends an sk_buff to an offload queue driver
622  * after dealing with any active network taps.
623  */
624 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
625 {
626         int ret;
627
628         local_bh_disable();
629         ret = t3_offload_tx(tdev, skb);
630         local_bh_enable();
631         return ret;
632 }
633
634 static int write_smt_entry(struct adapter *adapter, int idx)
635 {
636         struct cpl_smt_write_req *req;
637         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
638
639         if (!skb)
640                 return -ENOMEM;
641
642         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
643         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
644         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
645         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
646         req->iff = idx;
647         memset(req->src_mac1, 0, sizeof(req->src_mac1));
648         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
649         skb->priority = 1;
650         offload_tx(&adapter->tdev, skb);
651         return 0;
652 }
653
654 static int init_smt(struct adapter *adapter)
655 {
656         int i;
657
658         for_each_port(adapter, i)
659             write_smt_entry(adapter, i);
660         return 0;
661 }
662
663 static void init_port_mtus(struct adapter *adapter)
664 {
665         unsigned int mtus = adapter->port[0]->mtu;
666
667         if (adapter->port[1])
668                 mtus |= adapter->port[1]->mtu << 16;
669         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
670 }
671
672 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
673                               int hi, int port)
674 {
675         struct sk_buff *skb;
676         struct mngt_pktsched_wr *req;
677
678         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
679         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
680         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
681         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
682         req->sched = sched;
683         req->idx = qidx;
684         req->min = lo;
685         req->max = hi;
686         req->binding = port;
687         t3_mgmt_tx(adap, skb);
688 }
689
690 static void bind_qsets(struct adapter *adap)
691 {
692         int i, j;
693
694         for_each_port(adap, i) {
695                 const struct port_info *pi = adap2pinfo(adap, i);
696
697                 for (j = 0; j < pi->nqsets; ++j)
698                         send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
699                                           -1, i);
700         }
701 }
702
703 /**
704  *      cxgb_up - enable the adapter
705  *      @adapter: adapter being enabled
706  *
707  *      Called when the first port is enabled, this function performs the
708  *      actions necessary to make an adapter operational, such as completing
709  *      the initialization of HW modules, and enabling interrupts.
710  *
711  *      Must be called with the rtnl lock held.
712  */
713 static int cxgb_up(struct adapter *adap)
714 {
715         int err = 0;
716
717         if (!(adap->flags & FULL_INIT_DONE)) {
718                 err = t3_check_fw_version(adap);
719                 if (err)
720                         goto out;
721
722                 err = init_dummy_netdevs(adap);
723                 if (err)
724                         goto out;
725
726                 err = t3_init_hw(adap, 0);
727                 if (err)
728                         goto out;
729
730                 err = setup_sge_qsets(adap);
731                 if (err)
732                         goto out;
733
734                 setup_rss(adap);
735                 adap->flags |= FULL_INIT_DONE;
736         }
737
738         t3_intr_clear(adap);
739
740         if (adap->flags & USING_MSIX) {
741                 name_msix_vecs(adap);
742                 err = request_irq(adap->msix_info[0].vec,
743                                   t3_async_intr_handler, 0,
744                                   adap->msix_info[0].desc, adap);
745                 if (err)
746                         goto irq_err;
747
748                 if (request_msix_data_irqs(adap)) {
749                         free_irq(adap->msix_info[0].vec, adap);
750                         goto irq_err;
751                 }
752         } else if ((err = request_irq(adap->pdev->irq,
753                                       t3_intr_handler(adap,
754                                                       adap->sge.qs[0].rspq.
755                                                       polling),
756                                       (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
757                                       adap->name, adap)))
758                 goto irq_err;
759
760         t3_sge_start(adap);
761         t3_intr_enable(adap);
762
763         if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
764                 bind_qsets(adap);
765         adap->flags |= QUEUES_BOUND;
766
767 out:
768         return err;
769 irq_err:
770         CH_ERR(adap, "request_irq failed, err %d\n", err);
771         goto out;
772 }
773
774 /*
775  * Release resources when all the ports and offloading have been stopped.
776  */
777 static void cxgb_down(struct adapter *adapter)
778 {
779         t3_sge_stop(adapter);
780         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
781         t3_intr_disable(adapter);
782         spin_unlock_irq(&adapter->work_lock);
783
784         if (adapter->flags & USING_MSIX) {
785                 int i, n = 0;
786
787                 free_irq(adapter->msix_info[0].vec, adapter);
788                 for_each_port(adapter, i)
789                     n += adap2pinfo(adapter, i)->nqsets;
790
791                 for (i = 0; i < n; ++i)
792                         free_irq(adapter->msix_info[i + 1].vec,
793                                  &adapter->sge.qs[i]);
794         } else
795                 free_irq(adapter->pdev->irq, adapter);
796
797         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
798         quiesce_rx(adapter);
799 }
800
801 static void schedule_chk_task(struct adapter *adap)
802 {
803         unsigned int timeo;
804
805         timeo = adap->params.linkpoll_period ?
806             (HZ * adap->params.linkpoll_period) / 10 :
807             adap->params.stats_update_period * HZ;
808         if (timeo)
809                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
810 }
811
812 static int offload_open(struct net_device *dev)
813 {
814         struct adapter *adapter = dev->priv;
815         struct t3cdev *tdev = T3CDEV(dev);
816         int adap_up = adapter->open_device_map & PORT_MASK;
817         int err = 0;
818
819         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
820                 return 0;
821
822         if (!adap_up && (err = cxgb_up(adapter)) < 0)
823                 return err;
824
825         t3_tp_set_offload_mode(adapter, 1);
826         tdev->lldev = adapter->port[0];
827         err = cxgb3_offload_activate(adapter);
828         if (err)
829                 goto out;
830
831         init_port_mtus(adapter);
832         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
833                      adapter->params.b_wnd,
834                      adapter->params.rev == 0 ?
835                      adapter->port[0]->mtu : 0xffff);
836         init_smt(adapter);
837
838         /* Never mind if the next step fails */
839         sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
840
841         /* Call back all registered clients */
842         cxgb3_add_clients(tdev);
843
844 out:
845         /* restore them in case the offload module has changed them */
846         if (err) {
847                 t3_tp_set_offload_mode(adapter, 0);
848                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
849                 cxgb3_set_dummy_ops(tdev);
850         }
851         return err;
852 }
853
854 static int offload_close(struct t3cdev *tdev)
855 {
856         struct adapter *adapter = tdev2adap(tdev);
857
858         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
859                 return 0;
860
861         /* Call back all registered clients */
862         cxgb3_remove_clients(tdev);
863
864         sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
865
866         tdev->lldev = NULL;
867         cxgb3_set_dummy_ops(tdev);
868         t3_tp_set_offload_mode(adapter, 0);
869         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
870
871         if (!adapter->open_device_map)
872                 cxgb_down(adapter);
873
874         cxgb3_offload_deactivate(adapter);
875         return 0;
876 }
877
878 static int cxgb_open(struct net_device *dev)
879 {
880         int err;
881         struct adapter *adapter = dev->priv;
882         struct port_info *pi = netdev_priv(dev);
883         int other_ports = adapter->open_device_map & PORT_MASK;
884
885         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
886                 return err;
887
888         set_bit(pi->port_id, &adapter->open_device_map);
889         if (!ofld_disable) {
890                 err = offload_open(dev);
891                 if (err)
892                         printk(KERN_WARNING
893                                "Could not initialize offload capabilities\n");
894         }
895
896         link_start(dev);
897         t3_port_intr_enable(adapter, pi->port_id);
898         netif_start_queue(dev);
899         if (!other_ports)
900                 schedule_chk_task(adapter);
901
902         return 0;
903 }
904
905 static int cxgb_close(struct net_device *dev)
906 {
907         struct adapter *adapter = dev->priv;
908         struct port_info *p = netdev_priv(dev);
909
910         t3_port_intr_disable(adapter, p->port_id);
911         netif_stop_queue(dev);
912         p->phy.ops->power_down(&p->phy, 1);
913         netif_carrier_off(dev);
914         t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
915
916         spin_lock(&adapter->work_lock); /* sync with update task */
917         clear_bit(p->port_id, &adapter->open_device_map);
918         spin_unlock(&adapter->work_lock);
919
920         if (!(adapter->open_device_map & PORT_MASK))
921                 cancel_rearming_delayed_workqueue(cxgb3_wq,
922                                                   &adapter->adap_check_task);
923
924         if (!adapter->open_device_map)
925                 cxgb_down(adapter);
926
927         return 0;
928 }
929
930 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
931 {
932         struct adapter *adapter = dev->priv;
933         struct port_info *p = netdev_priv(dev);
934         struct net_device_stats *ns = &p->netstats;
935         const struct mac_stats *pstats;
936
937         spin_lock(&adapter->stats_lock);
938         pstats = t3_mac_update_stats(&p->mac);
939         spin_unlock(&adapter->stats_lock);
940
941         ns->tx_bytes = pstats->tx_octets;
942         ns->tx_packets = pstats->tx_frames;
943         ns->rx_bytes = pstats->rx_octets;
944         ns->rx_packets = pstats->rx_frames;
945         ns->multicast = pstats->rx_mcast_frames;
946
947         ns->tx_errors = pstats->tx_underrun;
948         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
949             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
950             pstats->rx_fifo_ovfl;
951
952         /* detailed rx_errors */
953         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
954         ns->rx_over_errors = 0;
955         ns->rx_crc_errors = pstats->rx_fcs_errs;
956         ns->rx_frame_errors = pstats->rx_symbol_errs;
957         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
958         ns->rx_missed_errors = pstats->rx_cong_drops;
959
960         /* detailed tx_errors */
961         ns->tx_aborted_errors = 0;
962         ns->tx_carrier_errors = 0;
963         ns->tx_fifo_errors = pstats->tx_underrun;
964         ns->tx_heartbeat_errors = 0;
965         ns->tx_window_errors = 0;
966         return ns;
967 }
968
969 static u32 get_msglevel(struct net_device *dev)
970 {
971         struct adapter *adapter = dev->priv;
972
973         return adapter->msg_enable;
974 }
975
976 static void set_msglevel(struct net_device *dev, u32 val)
977 {
978         struct adapter *adapter = dev->priv;
979
980         adapter->msg_enable = val;
981 }
982
983 static char stats_strings[][ETH_GSTRING_LEN] = {
984         "TxOctetsOK         ",
985         "TxFramesOK         ",
986         "TxMulticastFramesOK",
987         "TxBroadcastFramesOK",
988         "TxPauseFrames      ",
989         "TxUnderrun         ",
990         "TxExtUnderrun      ",
991
992         "TxFrames64         ",
993         "TxFrames65To127    ",
994         "TxFrames128To255   ",
995         "TxFrames256To511   ",
996         "TxFrames512To1023  ",
997         "TxFrames1024To1518 ",
998         "TxFrames1519ToMax  ",
999
1000         "RxOctetsOK         ",
1001         "RxFramesOK         ",
1002         "RxMulticastFramesOK",
1003         "RxBroadcastFramesOK",
1004         "RxPauseFrames      ",
1005         "RxFCSErrors        ",
1006         "RxSymbolErrors     ",
1007         "RxShortErrors      ",
1008         "RxJabberErrors     ",
1009         "RxLengthErrors     ",
1010         "RxFIFOoverflow     ",
1011
1012         "RxFrames64         ",
1013         "RxFrames65To127    ",
1014         "RxFrames128To255   ",
1015         "RxFrames256To511   ",
1016         "RxFrames512To1023  ",
1017         "RxFrames1024To1518 ",
1018         "RxFrames1519ToMax  ",
1019
1020         "PhyFIFOErrors      ",
1021         "TSO                ",
1022         "VLANextractions    ",
1023         "VLANinsertions     ",
1024         "TxCsumOffload      ",
1025         "RxCsumGood         ",
1026         "RxDrops            "
1027 };
1028
1029 static int get_stats_count(struct net_device *dev)
1030 {
1031         return ARRAY_SIZE(stats_strings);
1032 }
1033
1034 #define T3_REGMAP_SIZE (3 * 1024)
1035
1036 static int get_regs_len(struct net_device *dev)
1037 {
1038         return T3_REGMAP_SIZE;
1039 }
1040
1041 static int get_eeprom_len(struct net_device *dev)
1042 {
1043         return EEPROMSIZE;
1044 }
1045
1046 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1047 {
1048         u32 fw_vers = 0;
1049         struct adapter *adapter = dev->priv;
1050
1051         t3_get_fw_version(adapter, &fw_vers);
1052
1053         strcpy(info->driver, DRV_NAME);
1054         strcpy(info->version, DRV_VERSION);
1055         strcpy(info->bus_info, pci_name(adapter->pdev));
1056         if (!fw_vers)
1057                 strcpy(info->fw_version, "N/A");
1058         else {
1059                 snprintf(info->fw_version, sizeof(info->fw_version),
1060                          "%s %u.%u.%u",
1061                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1062                          G_FW_VERSION_MAJOR(fw_vers),
1063                          G_FW_VERSION_MINOR(fw_vers),
1064                          G_FW_VERSION_MICRO(fw_vers));
1065         }
1066 }
1067
1068 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1069 {
1070         if (stringset == ETH_SS_STATS)
1071                 memcpy(data, stats_strings, sizeof(stats_strings));
1072 }
1073
1074 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1075                                             struct port_info *p, int idx)
1076 {
1077         int i;
1078         unsigned long tot = 0;
1079
1080         for (i = 0; i < p->nqsets; ++i)
1081                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1082         return tot;
1083 }
1084
1085 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1086                       u64 *data)
1087 {
1088         struct adapter *adapter = dev->priv;
1089         struct port_info *pi = netdev_priv(dev);
1090         const struct mac_stats *s;
1091
1092         spin_lock(&adapter->stats_lock);
1093         s = t3_mac_update_stats(&pi->mac);
1094         spin_unlock(&adapter->stats_lock);
1095
1096         *data++ = s->tx_octets;
1097         *data++ = s->tx_frames;
1098         *data++ = s->tx_mcast_frames;
1099         *data++ = s->tx_bcast_frames;
1100         *data++ = s->tx_pause;
1101         *data++ = s->tx_underrun;
1102         *data++ = s->tx_fifo_urun;
1103
1104         *data++ = s->tx_frames_64;
1105         *data++ = s->tx_frames_65_127;
1106         *data++ = s->tx_frames_128_255;
1107         *data++ = s->tx_frames_256_511;
1108         *data++ = s->tx_frames_512_1023;
1109         *data++ = s->tx_frames_1024_1518;
1110         *data++ = s->tx_frames_1519_max;
1111
1112         *data++ = s->rx_octets;
1113         *data++ = s->rx_frames;
1114         *data++ = s->rx_mcast_frames;
1115         *data++ = s->rx_bcast_frames;
1116         *data++ = s->rx_pause;
1117         *data++ = s->rx_fcs_errs;
1118         *data++ = s->rx_symbol_errs;
1119         *data++ = s->rx_short;
1120         *data++ = s->rx_jabber;
1121         *data++ = s->rx_too_long;
1122         *data++ = s->rx_fifo_ovfl;
1123
1124         *data++ = s->rx_frames_64;
1125         *data++ = s->rx_frames_65_127;
1126         *data++ = s->rx_frames_128_255;
1127         *data++ = s->rx_frames_256_511;
1128         *data++ = s->rx_frames_512_1023;
1129         *data++ = s->rx_frames_1024_1518;
1130         *data++ = s->rx_frames_1519_max;
1131
1132         *data++ = pi->phy.fifo_errors;
1133
1134         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1135         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1136         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1137         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1138         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1139         *data++ = s->rx_cong_drops;
1140 }
1141
1142 static inline void reg_block_dump(struct adapter *ap, void *buf,
1143                                   unsigned int start, unsigned int end)
1144 {
1145         u32 *p = buf + start;
1146
1147         for (; start <= end; start += sizeof(u32))
1148                 *p++ = t3_read_reg(ap, start);
1149 }
1150
1151 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1152                      void *buf)
1153 {
1154         struct adapter *ap = dev->priv;
1155
1156         /*
1157          * Version scheme:
1158          * bits 0..9: chip version
1159          * bits 10..15: chip revision
1160          * bit 31: set for PCIe cards
1161          */
1162         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1163
1164         /*
1165          * We skip the MAC statistics registers because they are clear-on-read.
1166          * Also reading multi-register stats would need to synchronize with the
1167          * periodic mac stats accumulation.  Hard to justify the complexity.
1168          */
1169         memset(buf, 0, T3_REGMAP_SIZE);
1170         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1171         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1172         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1173         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1174         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1175         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1176                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1177         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1178                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1179 }
1180
1181 static int restart_autoneg(struct net_device *dev)
1182 {
1183         struct port_info *p = netdev_priv(dev);
1184
1185         if (!netif_running(dev))
1186                 return -EAGAIN;
1187         if (p->link_config.autoneg != AUTONEG_ENABLE)
1188                 return -EINVAL;
1189         p->phy.ops->autoneg_restart(&p->phy);
1190         return 0;
1191 }
1192
1193 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1194 {
1195         int i;
1196         struct adapter *adapter = dev->priv;
1197
1198         if (data == 0)
1199                 data = 2;
1200
1201         for (i = 0; i < data * 2; i++) {
1202                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1203                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1204                 if (msleep_interruptible(500))
1205                         break;
1206         }
1207         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1208                          F_GPIO0_OUT_VAL);
1209         return 0;
1210 }
1211
1212 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1213 {
1214         struct port_info *p = netdev_priv(dev);
1215
1216         cmd->supported = p->link_config.supported;
1217         cmd->advertising = p->link_config.advertising;
1218
1219         if (netif_carrier_ok(dev)) {
1220                 cmd->speed = p->link_config.speed;
1221                 cmd->duplex = p->link_config.duplex;
1222         } else {
1223                 cmd->speed = -1;
1224                 cmd->duplex = -1;
1225         }
1226
1227         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1228         cmd->phy_address = p->phy.addr;
1229         cmd->transceiver = XCVR_EXTERNAL;
1230         cmd->autoneg = p->link_config.autoneg;
1231         cmd->maxtxpkt = 0;
1232         cmd->maxrxpkt = 0;
1233         return 0;
1234 }
1235
1236 static int speed_duplex_to_caps(int speed, int duplex)
1237 {
1238         int cap = 0;
1239
1240         switch (speed) {
1241         case SPEED_10:
1242                 if (duplex == DUPLEX_FULL)
1243                         cap = SUPPORTED_10baseT_Full;
1244                 else
1245                         cap = SUPPORTED_10baseT_Half;
1246                 break;
1247         case SPEED_100:
1248                 if (duplex == DUPLEX_FULL)
1249                         cap = SUPPORTED_100baseT_Full;
1250                 else
1251                         cap = SUPPORTED_100baseT_Half;
1252                 break;
1253         case SPEED_1000:
1254                 if (duplex == DUPLEX_FULL)
1255                         cap = SUPPORTED_1000baseT_Full;
1256                 else
1257                         cap = SUPPORTED_1000baseT_Half;
1258                 break;
1259         case SPEED_10000:
1260                 if (duplex == DUPLEX_FULL)
1261                         cap = SUPPORTED_10000baseT_Full;
1262         }
1263         return cap;
1264 }
1265
1266 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1267                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1268                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1269                       ADVERTISED_10000baseT_Full)
1270
1271 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1272 {
1273         struct port_info *p = netdev_priv(dev);
1274         struct link_config *lc = &p->link_config;
1275
1276         if (!(lc->supported & SUPPORTED_Autoneg))
1277                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1278
1279         if (cmd->autoneg == AUTONEG_DISABLE) {
1280                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1281
1282                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1283                         return -EINVAL;
1284                 lc->requested_speed = cmd->speed;
1285                 lc->requested_duplex = cmd->duplex;
1286                 lc->advertising = 0;
1287         } else {
1288                 cmd->advertising &= ADVERTISED_MASK;
1289                 cmd->advertising &= lc->supported;
1290                 if (!cmd->advertising)
1291                         return -EINVAL;
1292                 lc->requested_speed = SPEED_INVALID;
1293                 lc->requested_duplex = DUPLEX_INVALID;
1294                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1295         }
1296         lc->autoneg = cmd->autoneg;
1297         if (netif_running(dev))
1298                 t3_link_start(&p->phy, &p->mac, lc);
1299         return 0;
1300 }
1301
1302 static void get_pauseparam(struct net_device *dev,
1303                            struct ethtool_pauseparam *epause)
1304 {
1305         struct port_info *p = netdev_priv(dev);
1306
1307         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1308         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1309         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1310 }
1311
1312 static int set_pauseparam(struct net_device *dev,
1313                           struct ethtool_pauseparam *epause)
1314 {
1315         struct port_info *p = netdev_priv(dev);
1316         struct link_config *lc = &p->link_config;
1317
1318         if (epause->autoneg == AUTONEG_DISABLE)
1319                 lc->requested_fc = 0;
1320         else if (lc->supported & SUPPORTED_Autoneg)
1321                 lc->requested_fc = PAUSE_AUTONEG;
1322         else
1323                 return -EINVAL;
1324
1325         if (epause->rx_pause)
1326                 lc->requested_fc |= PAUSE_RX;
1327         if (epause->tx_pause)
1328                 lc->requested_fc |= PAUSE_TX;
1329         if (lc->autoneg == AUTONEG_ENABLE) {
1330                 if (netif_running(dev))
1331                         t3_link_start(&p->phy, &p->mac, lc);
1332         } else {
1333                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1334                 if (netif_running(dev))
1335                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1336         }
1337         return 0;
1338 }
1339
1340 static u32 get_rx_csum(struct net_device *dev)
1341 {
1342         struct port_info *p = netdev_priv(dev);
1343
1344         return p->rx_csum_offload;
1345 }
1346
1347 static int set_rx_csum(struct net_device *dev, u32 data)
1348 {
1349         struct port_info *p = netdev_priv(dev);
1350
1351         p->rx_csum_offload = data;
1352         return 0;
1353 }
1354
1355 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1356 {
1357         struct adapter *adapter = dev->priv;
1358
1359         e->rx_max_pending = MAX_RX_BUFFERS;
1360         e->rx_mini_max_pending = 0;
1361         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1362         e->tx_max_pending = MAX_TXQ_ENTRIES;
1363
1364         e->rx_pending = adapter->params.sge.qset[0].fl_size;
1365         e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1366         e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1367         e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1368 }
1369
1370 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1371 {
1372         int i;
1373         struct adapter *adapter = dev->priv;
1374
1375         if (e->rx_pending > MAX_RX_BUFFERS ||
1376             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1377             e->tx_pending > MAX_TXQ_ENTRIES ||
1378             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1379             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1380             e->rx_pending < MIN_FL_ENTRIES ||
1381             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1382             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1383                 return -EINVAL;
1384
1385         if (adapter->flags & FULL_INIT_DONE)
1386                 return -EBUSY;
1387
1388         for (i = 0; i < SGE_QSETS; ++i) {
1389                 struct qset_params *q = &adapter->params.sge.qset[i];
1390
1391                 q->rspq_size = e->rx_mini_pending;
1392                 q->fl_size = e->rx_pending;
1393                 q->jumbo_size = e->rx_jumbo_pending;
1394                 q->txq_size[0] = e->tx_pending;
1395                 q->txq_size[1] = e->tx_pending;
1396                 q->txq_size[2] = e->tx_pending;
1397         }
1398         return 0;
1399 }
1400
1401 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1402 {
1403         struct adapter *adapter = dev->priv;
1404         struct qset_params *qsp = &adapter->params.sge.qset[0];
1405         struct sge_qset *qs = &adapter->sge.qs[0];
1406
1407         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1408                 return -EINVAL;
1409
1410         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1411         t3_update_qset_coalesce(qs, qsp);
1412         return 0;
1413 }
1414
1415 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1416 {
1417         struct adapter *adapter = dev->priv;
1418         struct qset_params *q = adapter->params.sge.qset;
1419
1420         c->rx_coalesce_usecs = q->coalesce_usecs;
1421         return 0;
1422 }
1423
1424 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1425                       u8 * data)
1426 {
1427         int i, err = 0;
1428         struct adapter *adapter = dev->priv;
1429
1430         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1431         if (!buf)
1432                 return -ENOMEM;
1433
1434         e->magic = EEPROM_MAGIC;
1435         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1436                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1437
1438         if (!err)
1439                 memcpy(data, buf + e->offset, e->len);
1440         kfree(buf);
1441         return err;
1442 }
1443
1444 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1445                       u8 * data)
1446 {
1447         u8 *buf;
1448         int err = 0;
1449         u32 aligned_offset, aligned_len, *p;
1450         struct adapter *adapter = dev->priv;
1451
1452         if (eeprom->magic != EEPROM_MAGIC)
1453                 return -EINVAL;
1454
1455         aligned_offset = eeprom->offset & ~3;
1456         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1457
1458         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1459                 buf = kmalloc(aligned_len, GFP_KERNEL);
1460                 if (!buf)
1461                         return -ENOMEM;
1462                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1463                 if (!err && aligned_len > 4)
1464                         err = t3_seeprom_read(adapter,
1465                                               aligned_offset + aligned_len - 4,
1466                                               (u32 *) & buf[aligned_len - 4]);
1467                 if (err)
1468                         goto out;
1469                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1470         } else
1471                 buf = data;
1472
1473         err = t3_seeprom_wp(adapter, 0);
1474         if (err)
1475                 goto out;
1476
1477         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1478                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1479                 aligned_offset += 4;
1480         }
1481
1482         if (!err)
1483                 err = t3_seeprom_wp(adapter, 1);
1484 out:
1485         if (buf != data)
1486                 kfree(buf);
1487         return err;
1488 }
1489
1490 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1491 {
1492         wol->supported = 0;
1493         wol->wolopts = 0;
1494         memset(&wol->sopass, 0, sizeof(wol->sopass));
1495 }
1496
1497 static const struct ethtool_ops cxgb_ethtool_ops = {
1498         .get_settings = get_settings,
1499         .set_settings = set_settings,
1500         .get_drvinfo = get_drvinfo,
1501         .get_msglevel = get_msglevel,
1502         .set_msglevel = set_msglevel,
1503         .get_ringparam = get_sge_param,
1504         .set_ringparam = set_sge_param,
1505         .get_coalesce = get_coalesce,
1506         .set_coalesce = set_coalesce,
1507         .get_eeprom_len = get_eeprom_len,
1508         .get_eeprom = get_eeprom,
1509         .set_eeprom = set_eeprom,
1510         .get_pauseparam = get_pauseparam,
1511         .set_pauseparam = set_pauseparam,
1512         .get_rx_csum = get_rx_csum,
1513         .set_rx_csum = set_rx_csum,
1514         .get_tx_csum = ethtool_op_get_tx_csum,
1515         .set_tx_csum = ethtool_op_set_tx_csum,
1516         .get_sg = ethtool_op_get_sg,
1517         .set_sg = ethtool_op_set_sg,
1518         .get_link = ethtool_op_get_link,
1519         .get_strings = get_strings,
1520         .phys_id = cxgb3_phys_id,
1521         .nway_reset = restart_autoneg,
1522         .get_stats_count = get_stats_count,
1523         .get_ethtool_stats = get_stats,
1524         .get_regs_len = get_regs_len,
1525         .get_regs = get_regs,
1526         .get_wol = get_wol,
1527         .get_tso = ethtool_op_get_tso,
1528         .set_tso = ethtool_op_set_tso,
1529         .get_perm_addr = ethtool_op_get_perm_addr
1530 };
1531
1532 static int in_range(int val, int lo, int hi)
1533 {
1534         return val < 0 || (val <= hi && val >= lo);
1535 }
1536
1537 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1538 {
1539         int ret;
1540         u32 cmd;
1541         struct adapter *adapter = dev->priv;
1542
1543         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1544                 return -EFAULT;
1545
1546         switch (cmd) {
1547         case CHELSIO_SETREG:{
1548                 struct ch_reg edata;
1549
1550                 if (!capable(CAP_NET_ADMIN))
1551                         return -EPERM;
1552                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1553                         return -EFAULT;
1554                 if ((edata.addr & 3) != 0
1555                         || edata.addr >= adapter->mmio_len)
1556                         return -EINVAL;
1557                 writel(edata.val, adapter->regs + edata.addr);
1558                 break;
1559         }
1560         case CHELSIO_GETREG:{
1561                 struct ch_reg edata;
1562
1563                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1564                         return -EFAULT;
1565                 if ((edata.addr & 3) != 0
1566                         || edata.addr >= adapter->mmio_len)
1567                         return -EINVAL;
1568                 edata.val = readl(adapter->regs + edata.addr);
1569                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1570                         return -EFAULT;
1571                 break;
1572         }
1573         case CHELSIO_SET_QSET_PARAMS:{
1574                 int i;
1575                 struct qset_params *q;
1576                 struct ch_qset_params t;
1577
1578                 if (!capable(CAP_NET_ADMIN))
1579                         return -EPERM;
1580                 if (copy_from_user(&t, useraddr, sizeof(t)))
1581                         return -EFAULT;
1582                 if (t.qset_idx >= SGE_QSETS)
1583                         return -EINVAL;
1584                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1585                         !in_range(t.cong_thres, 0, 255) ||
1586                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1587                                 MAX_TXQ_ENTRIES) ||
1588                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1589                                 MAX_TXQ_ENTRIES) ||
1590                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1591                                 MAX_CTRL_TXQ_ENTRIES) ||
1592                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1593                                 MAX_RX_BUFFERS)
1594                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1595                                         MAX_RX_JUMBO_BUFFERS)
1596                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1597                                         MAX_RSPQ_ENTRIES))
1598                         return -EINVAL;
1599                 if ((adapter->flags & FULL_INIT_DONE) &&
1600                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1601                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1602                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1603                         t.polling >= 0 || t.cong_thres >= 0))
1604                         return -EBUSY;
1605
1606                 q = &adapter->params.sge.qset[t.qset_idx];
1607
1608                 if (t.rspq_size >= 0)
1609                         q->rspq_size = t.rspq_size;
1610                 if (t.fl_size[0] >= 0)
1611                         q->fl_size = t.fl_size[0];
1612                 if (t.fl_size[1] >= 0)
1613                         q->jumbo_size = t.fl_size[1];
1614                 if (t.txq_size[0] >= 0)
1615                         q->txq_size[0] = t.txq_size[0];
1616                 if (t.txq_size[1] >= 0)
1617                         q->txq_size[1] = t.txq_size[1];
1618                 if (t.txq_size[2] >= 0)
1619                         q->txq_size[2] = t.txq_size[2];
1620                 if (t.cong_thres >= 0)
1621                         q->cong_thres = t.cong_thres;
1622                 if (t.intr_lat >= 0) {
1623                         struct sge_qset *qs =
1624                                 &adapter->sge.qs[t.qset_idx];
1625
1626                         q->coalesce_usecs = t.intr_lat;
1627                         t3_update_qset_coalesce(qs, q);
1628                 }
1629                 if (t.polling >= 0) {
1630                         if (adapter->flags & USING_MSIX)
1631                                 q->polling = t.polling;
1632                         else {
1633                                 /* No polling with INTx for T3A */
1634                                 if (adapter->params.rev == 0 &&
1635                                         !(adapter->flags & USING_MSI))
1636                                         t.polling = 0;
1637
1638                                 for (i = 0; i < SGE_QSETS; i++) {
1639                                         q = &adapter->params.sge.
1640                                                 qset[i];
1641                                         q->polling = t.polling;
1642                                 }
1643                         }
1644                 }
1645                 break;
1646         }
1647         case CHELSIO_GET_QSET_PARAMS:{
1648                 struct qset_params *q;
1649                 struct ch_qset_params t;
1650
1651                 if (copy_from_user(&t, useraddr, sizeof(t)))
1652                         return -EFAULT;
1653                 if (t.qset_idx >= SGE_QSETS)
1654                         return -EINVAL;
1655
1656                 q = &adapter->params.sge.qset[t.qset_idx];
1657                 t.rspq_size = q->rspq_size;
1658                 t.txq_size[0] = q->txq_size[0];
1659                 t.txq_size[1] = q->txq_size[1];
1660                 t.txq_size[2] = q->txq_size[2];
1661                 t.fl_size[0] = q->fl_size;
1662                 t.fl_size[1] = q->jumbo_size;
1663                 t.polling = q->polling;
1664                 t.intr_lat = q->coalesce_usecs;
1665                 t.cong_thres = q->cong_thres;
1666
1667                 if (copy_to_user(useraddr, &t, sizeof(t)))
1668                         return -EFAULT;
1669                 break;
1670         }
1671         case CHELSIO_SET_QSET_NUM:{
1672                 struct ch_reg edata;
1673                 struct port_info *pi = netdev_priv(dev);
1674                 unsigned int i, first_qset = 0, other_qsets = 0;
1675
1676                 if (!capable(CAP_NET_ADMIN))
1677                         return -EPERM;
1678                 if (adapter->flags & FULL_INIT_DONE)
1679                         return -EBUSY;
1680                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1681                         return -EFAULT;
1682                 if (edata.val < 1 ||
1683                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1684                         return -EINVAL;
1685
1686                 for_each_port(adapter, i)
1687                         if (adapter->port[i] && adapter->port[i] != dev)
1688                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1689
1690                 if (edata.val + other_qsets > SGE_QSETS)
1691                         return -EINVAL;
1692
1693                 pi->nqsets = edata.val;
1694
1695                 for_each_port(adapter, i)
1696                         if (adapter->port[i]) {
1697                                 pi = adap2pinfo(adapter, i);
1698                                 pi->first_qset = first_qset;
1699                                 first_qset += pi->nqsets;
1700                         }
1701                 break;
1702         }
1703         case CHELSIO_GET_QSET_NUM:{
1704                 struct ch_reg edata;
1705                 struct port_info *pi = netdev_priv(dev);
1706
1707                 edata.cmd = CHELSIO_GET_QSET_NUM;
1708                 edata.val = pi->nqsets;
1709                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1710                         return -EFAULT;
1711                 break;
1712         }
1713         case CHELSIO_LOAD_FW:{
1714                 u8 *fw_data;
1715                 struct ch_mem_range t;
1716
1717                 if (!capable(CAP_NET_ADMIN))
1718                         return -EPERM;
1719                 if (copy_from_user(&t, useraddr, sizeof(t)))
1720                         return -EFAULT;
1721
1722                 fw_data = kmalloc(t.len, GFP_KERNEL);
1723                 if (!fw_data)
1724                         return -ENOMEM;
1725
1726                 if (copy_from_user
1727                         (fw_data, useraddr + sizeof(t), t.len)) {
1728                         kfree(fw_data);
1729                         return -EFAULT;
1730                 }
1731
1732                 ret = t3_load_fw(adapter, fw_data, t.len);
1733                 kfree(fw_data);
1734                 if (ret)
1735                         return ret;
1736                 break;
1737         }
1738         case CHELSIO_SETMTUTAB:{
1739                 struct ch_mtus m;
1740                 int i;
1741
1742                 if (!is_offload(adapter))
1743                         return -EOPNOTSUPP;
1744                 if (!capable(CAP_NET_ADMIN))
1745                         return -EPERM;
1746                 if (offload_running(adapter))
1747                         return -EBUSY;
1748                 if (copy_from_user(&m, useraddr, sizeof(m)))
1749                         return -EFAULT;
1750                 if (m.nmtus != NMTUS)
1751                         return -EINVAL;
1752                 if (m.mtus[0] < 81)     /* accommodate SACK */
1753                         return -EINVAL;
1754
1755                 /* MTUs must be in ascending order */
1756                 for (i = 1; i < NMTUS; ++i)
1757                         if (m.mtus[i] < m.mtus[i - 1])
1758                                 return -EINVAL;
1759
1760                 memcpy(adapter->params.mtus, m.mtus,
1761                         sizeof(adapter->params.mtus));
1762                 break;
1763         }
1764         case CHELSIO_GET_PM:{
1765                 struct tp_params *p = &adapter->params.tp;
1766                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1767
1768                 if (!is_offload(adapter))
1769                         return -EOPNOTSUPP;
1770                 m.tx_pg_sz = p->tx_pg_size;
1771                 m.tx_num_pg = p->tx_num_pgs;
1772                 m.rx_pg_sz = p->rx_pg_size;
1773                 m.rx_num_pg = p->rx_num_pgs;
1774                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1775                 if (copy_to_user(useraddr, &m, sizeof(m)))
1776                         return -EFAULT;
1777                 break;
1778         }
1779         case CHELSIO_SET_PM:{
1780                 struct ch_pm m;
1781                 struct tp_params *p = &adapter->params.tp;
1782
1783                 if (!is_offload(adapter))
1784                         return -EOPNOTSUPP;
1785                 if (!capable(CAP_NET_ADMIN))
1786                         return -EPERM;
1787                 if (adapter->flags & FULL_INIT_DONE)
1788                         return -EBUSY;
1789                 if (copy_from_user(&m, useraddr, sizeof(m)))
1790                         return -EFAULT;
1791                 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1792                         !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1793                         return -EINVAL; /* not power of 2 */
1794                 if (!(m.rx_pg_sz & 0x14000))
1795                         return -EINVAL; /* not 16KB or 64KB */
1796                 if (!(m.tx_pg_sz & 0x1554000))
1797                         return -EINVAL;
1798                 if (m.tx_num_pg == -1)
1799                         m.tx_num_pg = p->tx_num_pgs;
1800                 if (m.rx_num_pg == -1)
1801                         m.rx_num_pg = p->rx_num_pgs;
1802                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1803                         return -EINVAL;
1804                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1805                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1806                         return -EINVAL;
1807                 p->rx_pg_size = m.rx_pg_sz;
1808                 p->tx_pg_size = m.tx_pg_sz;
1809                 p->rx_num_pgs = m.rx_num_pg;
1810                 p->tx_num_pgs = m.tx_num_pg;
1811                 break;
1812         }
1813         case CHELSIO_GET_MEM:{
1814                 struct ch_mem_range t;
1815                 struct mc7 *mem;
1816                 u64 buf[32];
1817
1818                 if (!is_offload(adapter))
1819                         return -EOPNOTSUPP;
1820                 if (!(adapter->flags & FULL_INIT_DONE))
1821                         return -EIO;    /* need the memory controllers */
1822                 if (copy_from_user(&t, useraddr, sizeof(t)))
1823                         return -EFAULT;
1824                 if ((t.addr & 7) || (t.len & 7))
1825                         return -EINVAL;
1826                 if (t.mem_id == MEM_CM)
1827                         mem = &adapter->cm;
1828                 else if (t.mem_id == MEM_PMRX)
1829                         mem = &adapter->pmrx;
1830                 else if (t.mem_id == MEM_PMTX)
1831                         mem = &adapter->pmtx;
1832                 else
1833                         return -EINVAL;
1834
1835                 /*
1836                         * Version scheme:
1837                         * bits 0..9: chip version
1838                         * bits 10..15: chip revision
1839                         */
1840                 t.version = 3 | (adapter->params.rev << 10);
1841                 if (copy_to_user(useraddr, &t, sizeof(t)))
1842                         return -EFAULT;
1843
1844                 /*
1845                  * Read 256 bytes at a time as len can be large and we don't
1846                  * want to use huge intermediate buffers.
1847                  */
1848                 useraddr += sizeof(t);  /* advance to start of buffer */
1849                 while (t.len) {
1850                         unsigned int chunk =
1851                                 min_t(unsigned int, t.len, sizeof(buf));
1852
1853                         ret =
1854                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1855                                                 buf);
1856                         if (ret)
1857                                 return ret;
1858                         if (copy_to_user(useraddr, buf, chunk))
1859                                 return -EFAULT;
1860                         useraddr += chunk;
1861                         t.addr += chunk;
1862                         t.len -= chunk;
1863                 }
1864                 break;
1865         }
1866         case CHELSIO_SET_TRACE_FILTER:{
1867                 struct ch_trace t;
1868                 const struct trace_params *tp;
1869
1870                 if (!capable(CAP_NET_ADMIN))
1871                         return -EPERM;
1872                 if (!offload_running(adapter))
1873                         return -EAGAIN;
1874                 if (copy_from_user(&t, useraddr, sizeof(t)))
1875                         return -EFAULT;
1876
1877                 tp = (const struct trace_params *)&t.sip;
1878                 if (t.config_tx)
1879                         t3_config_trace_filter(adapter, tp, 0,
1880                                                 t.invert_match,
1881                                                 t.trace_tx);
1882                 if (t.config_rx)
1883                         t3_config_trace_filter(adapter, tp, 1,
1884                                                 t.invert_match,
1885                                                 t.trace_rx);
1886                 break;
1887         }
1888         case CHELSIO_SET_PKTSCHED:{
1889                 struct ch_pktsched_params p;
1890
1891                 if (!capable(CAP_NET_ADMIN))
1892                                 return -EPERM;
1893                 if (!adapter->open_device_map)
1894                                 return -EAGAIN; /* uP and SGE must be running */
1895                 if (copy_from_user(&p, useraddr, sizeof(p)))
1896                                 return -EFAULT;
1897                 send_pktsched_cmd(adapter, p.sched, p.idx, p.min, p.max,
1898                                   p.binding);
1899                 break;
1900                         
1901         }
1902         default:
1903                 return -EOPNOTSUPP;
1904         }
1905         return 0;
1906 }
1907
1908 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1909 {
1910         int ret, mmd;
1911         struct adapter *adapter = dev->priv;
1912         struct port_info *pi = netdev_priv(dev);
1913         struct mii_ioctl_data *data = if_mii(req);
1914
1915         switch (cmd) {
1916         case SIOCGMIIPHY:
1917                 data->phy_id = pi->phy.addr;
1918                 /* FALLTHRU */
1919         case SIOCGMIIREG:{
1920                 u32 val;
1921                 struct cphy *phy = &pi->phy;
1922
1923                 if (!phy->mdio_read)
1924                         return -EOPNOTSUPP;
1925                 if (is_10G(adapter)) {
1926                         mmd = data->phy_id >> 8;
1927                         if (!mmd)
1928                                 mmd = MDIO_DEV_PCS;
1929                         else if (mmd > MDIO_DEV_XGXS)
1930                                 return -EINVAL;
1931
1932                         ret =
1933                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1934                                                 mmd, data->reg_num, &val);
1935                 } else
1936                         ret =
1937                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1938                                                 0, data->reg_num & 0x1f,
1939                                                 &val);
1940                 if (!ret)
1941                         data->val_out = val;
1942                 break;
1943         }
1944         case SIOCSMIIREG:{
1945                 struct cphy *phy = &pi->phy;
1946
1947                 if (!capable(CAP_NET_ADMIN))
1948                         return -EPERM;
1949                 if (!phy->mdio_write)
1950                         return -EOPNOTSUPP;
1951                 if (is_10G(adapter)) {
1952                         mmd = data->phy_id >> 8;
1953                         if (!mmd)
1954                                 mmd = MDIO_DEV_PCS;
1955                         else if (mmd > MDIO_DEV_XGXS)
1956                                 return -EINVAL;
1957
1958                         ret =
1959                                 phy->mdio_write(adapter,
1960                                                 data->phy_id & 0x1f, mmd,
1961                                                 data->reg_num,
1962                                                 data->val_in);
1963                 } else
1964                         ret =
1965                                 phy->mdio_write(adapter,
1966                                                 data->phy_id & 0x1f, 0,
1967                                                 data->reg_num & 0x1f,
1968                                                 data->val_in);
1969                 break;
1970         }
1971         case SIOCCHIOCTL:
1972                 return cxgb_extension_ioctl(dev, req->ifr_data);
1973         default:
1974                 return -EOPNOTSUPP;
1975         }
1976         return ret;
1977 }
1978
1979 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1980 {
1981         int ret;
1982         struct adapter *adapter = dev->priv;
1983         struct port_info *pi = netdev_priv(dev);
1984
1985         if (new_mtu < 81)       /* accommodate SACK */
1986                 return -EINVAL;
1987         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1988                 return ret;
1989         dev->mtu = new_mtu;
1990         init_port_mtus(adapter);
1991         if (adapter->params.rev == 0 && offload_running(adapter))
1992                 t3_load_mtus(adapter, adapter->params.mtus,
1993                              adapter->params.a_wnd, adapter->params.b_wnd,
1994                              adapter->port[0]->mtu);
1995         return 0;
1996 }
1997
1998 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1999 {
2000         struct adapter *adapter = dev->priv;
2001         struct port_info *pi = netdev_priv(dev);
2002         struct sockaddr *addr = p;
2003
2004         if (!is_valid_ether_addr(addr->sa_data))
2005                 return -EINVAL;
2006
2007         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2008         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2009         if (offload_running(adapter))
2010                 write_smt_entry(adapter, pi->port_id);
2011         return 0;
2012 }
2013
2014 /**
2015  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2016  * @adap: the adapter
2017  * @p: the port
2018  *
2019  * Ensures that current Rx processing on any of the queues associated with
2020  * the given port completes before returning.  We do this by acquiring and
2021  * releasing the locks of the response queues associated with the port.
2022  */
2023 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2024 {
2025         int i;
2026
2027         for (i = 0; i < p->nqsets; i++) {
2028                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2029
2030                 spin_lock_irq(&q->lock);
2031                 spin_unlock_irq(&q->lock);
2032         }
2033 }
2034
2035 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2036 {
2037         struct adapter *adapter = dev->priv;
2038         struct port_info *pi = netdev_priv(dev);
2039
2040         pi->vlan_grp = grp;
2041         if (adapter->params.rev > 0)
2042                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2043         else {
2044                 /* single control for all ports */
2045                 unsigned int i, have_vlans = 0;
2046                 for_each_port(adapter, i)
2047                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2048
2049                 t3_set_vlan_accel(adapter, 1, have_vlans);
2050         }
2051         t3_synchronize_rx(adapter, pi);
2052 }
2053
2054 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2055 {
2056         /* nothing */
2057 }
2058
2059 #ifdef CONFIG_NET_POLL_CONTROLLER
2060 static void cxgb_netpoll(struct net_device *dev)
2061 {
2062         struct adapter *adapter = dev->priv;
2063         struct sge_qset *qs = dev2qset(dev);
2064
2065         t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2066                                                     adapter);
2067 }
2068 #endif
2069
2070 /*
2071  * Periodic accumulation of MAC statistics.
2072  */
2073 static void mac_stats_update(struct adapter *adapter)
2074 {
2075         int i;
2076
2077         for_each_port(adapter, i) {
2078                 struct net_device *dev = adapter->port[i];
2079                 struct port_info *p = netdev_priv(dev);
2080
2081                 if (netif_running(dev)) {
2082                         spin_lock(&adapter->stats_lock);
2083                         t3_mac_update_stats(&p->mac);
2084                         spin_unlock(&adapter->stats_lock);
2085                 }
2086         }
2087 }
2088
2089 static void check_link_status(struct adapter *adapter)
2090 {
2091         int i;
2092
2093         for_each_port(adapter, i) {
2094                 struct net_device *dev = adapter->port[i];
2095                 struct port_info *p = netdev_priv(dev);
2096
2097                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2098                         t3_link_changed(adapter, i);
2099         }
2100 }
2101
2102 static void t3_adap_check_task(struct work_struct *work)
2103 {
2104         struct adapter *adapter = container_of(work, struct adapter,
2105                                                adap_check_task.work);
2106         const struct adapter_params *p = &adapter->params;
2107
2108         adapter->check_task_cnt++;
2109
2110         /* Check link status for PHYs without interrupts */
2111         if (p->linkpoll_period)
2112                 check_link_status(adapter);
2113
2114         /* Accumulate MAC stats if needed */
2115         if (!p->linkpoll_period ||
2116             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2117             p->stats_update_period) {
2118                 mac_stats_update(adapter);
2119                 adapter->check_task_cnt = 0;
2120         }
2121
2122         /* Schedule the next check update if any port is active. */
2123         spin_lock(&adapter->work_lock);
2124         if (adapter->open_device_map & PORT_MASK)
2125                 schedule_chk_task(adapter);
2126         spin_unlock(&adapter->work_lock);
2127 }
2128
2129 /*
2130  * Processes external (PHY) interrupts in process context.
2131  */
2132 static void ext_intr_task(struct work_struct *work)
2133 {
2134         struct adapter *adapter = container_of(work, struct adapter,
2135                                                ext_intr_handler_task);
2136
2137         t3_phy_intr_handler(adapter);
2138
2139         /* Now reenable external interrupts */
2140         spin_lock_irq(&adapter->work_lock);
2141         if (adapter->slow_intr_mask) {
2142                 adapter->slow_intr_mask |= F_T3DBG;
2143                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2144                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2145                              adapter->slow_intr_mask);
2146         }
2147         spin_unlock_irq(&adapter->work_lock);
2148 }
2149
2150 /*
2151  * Interrupt-context handler for external (PHY) interrupts.
2152  */
2153 void t3_os_ext_intr_handler(struct adapter *adapter)
2154 {
2155         /*
2156          * Schedule a task to handle external interrupts as they may be slow
2157          * and we use a mutex to protect MDIO registers.  We disable PHY
2158          * interrupts in the meantime and let the task reenable them when
2159          * it's done.
2160          */
2161         spin_lock(&adapter->work_lock);
2162         if (adapter->slow_intr_mask) {
2163                 adapter->slow_intr_mask &= ~F_T3DBG;
2164                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2165                              adapter->slow_intr_mask);
2166                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2167         }
2168         spin_unlock(&adapter->work_lock);
2169 }
2170
2171 void t3_fatal_err(struct adapter *adapter)
2172 {
2173         unsigned int fw_status[4];
2174
2175         if (adapter->flags & FULL_INIT_DONE) {
2176                 t3_sge_stop(adapter);
2177                 t3_intr_disable(adapter);
2178         }
2179         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2180         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2181                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2182                          fw_status[0], fw_status[1],
2183                          fw_status[2], fw_status[3]);
2184
2185 }
2186
2187 static int __devinit cxgb_enable_msix(struct adapter *adap)
2188 {
2189         struct msix_entry entries[SGE_QSETS + 1];
2190         int i, err;
2191
2192         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2193                 entries[i].entry = i;
2194
2195         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2196         if (!err) {
2197                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2198                         adap->msix_info[i].vec = entries[i].vector;
2199         } else if (err > 0)
2200                 dev_info(&adap->pdev->dev,
2201                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2202         return err;
2203 }
2204
2205 static void __devinit print_port_info(struct adapter *adap,
2206                                       const struct adapter_info *ai)
2207 {
2208         static const char *pci_variant[] = {
2209                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2210         };
2211
2212         int i;
2213         char buf[80];
2214
2215         if (is_pcie(adap))
2216                 snprintf(buf, sizeof(buf), "%s x%d",
2217                          pci_variant[adap->params.pci.variant],
2218                          adap->params.pci.width);
2219         else
2220                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2221                          pci_variant[adap->params.pci.variant],
2222                          adap->params.pci.speed, adap->params.pci.width);
2223
2224         for_each_port(adap, i) {
2225                 struct net_device *dev = adap->port[i];
2226                 const struct port_info *pi = netdev_priv(dev);
2227
2228                 if (!test_bit(i, &adap->registered_device_map))
2229                         continue;
2230                 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2231                        dev->name, ai->desc, pi->port_type->desc,
2232                        adap->params.rev, buf,
2233                        (adap->flags & USING_MSIX) ? " MSI-X" :
2234                        (adap->flags & USING_MSI) ? " MSI" : "");
2235                 if (adap->name == dev->name && adap->params.vpd.mclk)
2236                         printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2237                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2238                                t3_mc7_size(&adap->pmtx) >> 20,
2239                                t3_mc7_size(&adap->pmrx) >> 20);
2240         }
2241 }
2242
2243 static int __devinit init_one(struct pci_dev *pdev,
2244                               const struct pci_device_id *ent)
2245 {
2246         static int version_printed;
2247
2248         int i, err, pci_using_dac = 0;
2249         unsigned long mmio_start, mmio_len;
2250         const struct adapter_info *ai;
2251         struct adapter *adapter = NULL;
2252         struct port_info *pi;
2253
2254         if (!version_printed) {
2255                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2256                 ++version_printed;
2257         }
2258
2259         if (!cxgb3_wq) {
2260                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2261                 if (!cxgb3_wq) {
2262                         printk(KERN_ERR DRV_NAME
2263                                ": cannot initialize work queue\n");
2264                         return -ENOMEM;
2265                 }
2266         }
2267
2268         err = pci_request_regions(pdev, DRV_NAME);
2269         if (err) {
2270                 /* Just info, some other driver may have claimed the device. */
2271                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2272                 return err;
2273         }
2274
2275         err = pci_enable_device(pdev);
2276         if (err) {
2277                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2278                 goto out_release_regions;
2279         }
2280
2281         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2282                 pci_using_dac = 1;
2283                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2284                 if (err) {
2285                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2286                                "coherent allocations\n");
2287                         goto out_disable_device;
2288                 }
2289         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2290                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2291                 goto out_disable_device;
2292         }
2293
2294         pci_set_master(pdev);
2295
2296         mmio_start = pci_resource_start(pdev, 0);
2297         mmio_len = pci_resource_len(pdev, 0);
2298         ai = t3_get_adapter_info(ent->driver_data);
2299
2300         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2301         if (!adapter) {
2302                 err = -ENOMEM;
2303                 goto out_disable_device;
2304         }
2305
2306         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2307         if (!adapter->regs) {
2308                 dev_err(&pdev->dev, "cannot map device registers\n");
2309                 err = -ENOMEM;
2310                 goto out_free_adapter;
2311         }
2312
2313         adapter->pdev = pdev;
2314         adapter->name = pci_name(pdev);
2315         adapter->msg_enable = dflt_msg_enable;
2316         adapter->mmio_len = mmio_len;
2317
2318         mutex_init(&adapter->mdio_lock);
2319         spin_lock_init(&adapter->work_lock);
2320         spin_lock_init(&adapter->stats_lock);
2321
2322         INIT_LIST_HEAD(&adapter->adapter_list);
2323         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2324         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2325
2326         for (i = 0; i < ai->nports; ++i) {
2327                 struct net_device *netdev;
2328
2329                 netdev = alloc_etherdev(sizeof(struct port_info));
2330                 if (!netdev) {
2331                         err = -ENOMEM;
2332                         goto out_free_dev;
2333                 }
2334
2335                 SET_MODULE_OWNER(netdev);
2336                 SET_NETDEV_DEV(netdev, &pdev->dev);
2337
2338                 adapter->port[i] = netdev;
2339                 pi = netdev_priv(netdev);
2340                 pi->rx_csum_offload = 1;
2341                 pi->nqsets = 1;
2342                 pi->first_qset = i;
2343                 pi->activity = 0;
2344                 pi->port_id = i;
2345                 netif_carrier_off(netdev);
2346                 netdev->irq = pdev->irq;
2347                 netdev->mem_start = mmio_start;
2348                 netdev->mem_end = mmio_start + mmio_len - 1;
2349                 netdev->priv = adapter;
2350                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2351                 netdev->features |= NETIF_F_LLTX;
2352                 if (pci_using_dac)
2353                         netdev->features |= NETIF_F_HIGHDMA;
2354
2355                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2356                 netdev->vlan_rx_register = vlan_rx_register;
2357                 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2358
2359                 netdev->open = cxgb_open;
2360                 netdev->stop = cxgb_close;
2361                 netdev->hard_start_xmit = t3_eth_xmit;
2362                 netdev->get_stats = cxgb_get_stats;
2363                 netdev->set_multicast_list = cxgb_set_rxmode;
2364                 netdev->do_ioctl = cxgb_ioctl;
2365                 netdev->change_mtu = cxgb_change_mtu;
2366                 netdev->set_mac_address = cxgb_set_mac_addr;
2367 #ifdef CONFIG_NET_POLL_CONTROLLER
2368                 netdev->poll_controller = cxgb_netpoll;
2369 #endif
2370                 netdev->weight = 64;
2371
2372                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2373         }
2374
2375         pci_set_drvdata(pdev, adapter->port[0]);
2376         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2377                 err = -ENODEV;
2378                 goto out_free_dev;
2379         }
2380
2381         /*
2382          * The card is now ready to go.  If any errors occur during device
2383          * registration we do not fail the whole card but rather proceed only
2384          * with the ports we manage to register successfully.  However we must
2385          * register at least one net device.
2386          */
2387         for_each_port(adapter, i) {
2388                 err = register_netdev(adapter->port[i]);
2389                 if (err)
2390                         dev_warn(&pdev->dev,
2391                                  "cannot register net device %s, skipping\n",
2392                                  adapter->port[i]->name);
2393                 else {
2394                         /*
2395                          * Change the name we use for messages to the name of
2396                          * the first successfully registered interface.
2397                          */
2398                         if (!adapter->registered_device_map)
2399                                 adapter->name = adapter->port[i]->name;
2400
2401                         __set_bit(i, &adapter->registered_device_map);
2402                 }
2403         }
2404         if (!adapter->registered_device_map) {
2405                 dev_err(&pdev->dev, "could not register any net devices\n");
2406                 goto out_free_dev;
2407         }
2408
2409         /* Driver's ready. Reflect it on LEDs */
2410         t3_led_ready(adapter);
2411
2412         if (is_offload(adapter)) {
2413                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2414                 cxgb3_adapter_ofld(adapter);
2415         }
2416
2417         /* See what interrupts we'll be using */
2418         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2419                 adapter->flags |= USING_MSIX;
2420         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2421                 adapter->flags |= USING_MSI;
2422
2423         err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2424                                  &cxgb3_attr_group);
2425
2426         print_port_info(adapter, ai);
2427         return 0;
2428
2429 out_free_dev:
2430         iounmap(adapter->regs);
2431         for (i = ai->nports - 1; i >= 0; --i)
2432                 if (adapter->port[i])
2433                         free_netdev(adapter->port[i]);
2434
2435 out_free_adapter:
2436         kfree(adapter);
2437
2438 out_disable_device:
2439         pci_disable_device(pdev);
2440 out_release_regions:
2441         pci_release_regions(pdev);
2442         pci_set_drvdata(pdev, NULL);
2443         return err;
2444 }
2445
2446 static void __devexit remove_one(struct pci_dev *pdev)
2447 {
2448         struct net_device *dev = pci_get_drvdata(pdev);
2449
2450         if (dev) {
2451                 int i;
2452                 struct adapter *adapter = dev->priv;
2453
2454                 t3_sge_stop(adapter);
2455                 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2456                                    &cxgb3_attr_group);
2457
2458                 for_each_port(adapter, i)
2459                     if (test_bit(i, &adapter->registered_device_map))
2460                         unregister_netdev(adapter->port[i]);
2461
2462                 if (is_offload(adapter)) {
2463                         cxgb3_adapter_unofld(adapter);
2464                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2465                                      &adapter->open_device_map))
2466                                 offload_close(&adapter->tdev);
2467                 }
2468
2469                 t3_free_sge_resources(adapter);
2470                 cxgb_disable_msi(adapter);
2471
2472                 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2473                         if (adapter->dummy_netdev[i]) {
2474                                 free_netdev(adapter->dummy_netdev[i]);
2475                                 adapter->dummy_netdev[i] = NULL;
2476                         }
2477
2478                 for_each_port(adapter, i)
2479                         if (adapter->port[i])
2480                                 free_netdev(adapter->port[i]);
2481
2482                 iounmap(adapter->regs);
2483                 kfree(adapter);
2484                 pci_release_regions(pdev);
2485                 pci_disable_device(pdev);
2486                 pci_set_drvdata(pdev, NULL);
2487         }
2488 }
2489
2490 static struct pci_driver driver = {
2491         .name = DRV_NAME,
2492         .id_table = cxgb3_pci_tbl,
2493         .probe = init_one,
2494         .remove = __devexit_p(remove_one),
2495 };
2496
2497 static int __init cxgb3_init_module(void)
2498 {
2499         int ret;
2500
2501         cxgb3_offload_init();
2502
2503         ret = pci_register_driver(&driver);
2504         return ret;
2505 }
2506
2507 static void __exit cxgb3_cleanup_module(void)
2508 {
2509         pci_unregister_driver(&driver);
2510         if (cxgb3_wq)
2511                 destroy_workqueue(cxgb3_wq);
2512 }
2513
2514 module_init(cxgb3_init_module);
2515 module_exit(cxgb3_cleanup_module);