Add support for the latest 1G/10G Chelsio adapter, T3.
[pandora-kernel.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * This file is part of the Chelsio T3 Ethernet driver for Linux.
3  *
4  * Copyright (C) 2003-2006 Chelsio Communications.  All rights reserved.
5  *
6  * This program is distributed in the hope that it will be useful, but WITHOUT
7  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
9  * release for licensing terms and conditions.
10  */
11
12 #include <linux/module.h>
13 #include <linux/moduleparam.h>
14 #include <linux/init.h>
15 #include <linux/pci.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/netdevice.h>
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/mii.h>
21 #include <linux/sockios.h>
22 #include <linux/workqueue.h>
23 #include <linux/proc_fs.h>
24 #include <linux/rtnetlink.h>
25 #include <asm/uaccess.h>
26
27 #include "common.h"
28 #include "cxgb3_ioctl.h"
29 #include "regs.h"
30 #include "cxgb3_offload.h"
31 #include "version.h"
32
33 #include "cxgb3_ctl_defs.h"
34 #include "t3_cpl.h"
35 #include "firmware_exports.h"
36
37 enum {
38         MAX_TXQ_ENTRIES = 16384,
39         MAX_CTRL_TXQ_ENTRIES = 1024,
40         MAX_RSPQ_ENTRIES = 16384,
41         MAX_RX_BUFFERS = 16384,
42         MAX_RX_JUMBO_BUFFERS = 16384,
43         MIN_TXQ_ENTRIES = 4,
44         MIN_CTRL_TXQ_ENTRIES = 4,
45         MIN_RSPQ_ENTRIES = 32,
46         MIN_FL_ENTRIES = 32
47 };
48
49 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
50
51 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
52                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
53                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
54
55 #define EEPROM_MAGIC 0x38E2F10C
56
57 #define to_net_dev(class) container_of(class, struct net_device, class_dev)
58
59 #define CH_DEVICE(devid, ssid, idx) \
60         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
61
62 static const struct pci_device_id cxgb3_pci_tbl[] = {
63         CH_DEVICE(0x20, 1, 0),  /* PE9000 */
64         CH_DEVICE(0x21, 1, 1),  /* T302E */
65         CH_DEVICE(0x22, 1, 2),  /* T310E */
66         CH_DEVICE(0x23, 1, 3),  /* T320X */
67         CH_DEVICE(0x24, 1, 1),  /* T302X */
68         CH_DEVICE(0x25, 1, 3),  /* T320E */
69         CH_DEVICE(0x26, 1, 2),  /* T310X */
70         CH_DEVICE(0x30, 1, 2),  /* T3B10 */
71         CH_DEVICE(0x31, 1, 3),  /* T3B20 */
72         CH_DEVICE(0x32, 1, 1),  /* T3B02 */
73         {0,}
74 };
75
76 MODULE_DESCRIPTION(DRV_DESC);
77 MODULE_AUTHOR("Chelsio Communications");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_VERSION);
80 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
81
82 static int dflt_msg_enable = DFLT_MSG_ENABLE;
83
84 module_param(dflt_msg_enable, int, 0644);
85 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
86
87 /*
88  * The driver uses the best interrupt scheme available on a platform in the
89  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
90  * of these schemes the driver may consider as follows:
91  *
92  * msi = 2: choose from among all three options
93  * msi = 1: only consider MSI and pin interrupts
94  * msi = 0: force pin interrupts
95  */
96 static int msi = 2;
97
98 module_param(msi, int, 0644);
99 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
100
101 /*
102  * The driver enables offload as a default.
103  * To disable it, use ofld_disable = 1.
104  */
105
106 static int ofld_disable = 0;
107
108 module_param(ofld_disable, int, 0644);
109 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
110
111 /*
112  * We have work elements that we need to cancel when an interface is taken
113  * down.  Normally the work elements would be executed by keventd but that
114  * can deadlock because of linkwatch.  If our close method takes the rtnl
115  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
116  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
117  * for our work to complete.  Get our own work queue to solve this.
118  */
119 static struct workqueue_struct *cxgb3_wq;
120
121 /**
122  *      link_report - show link status and link speed/duplex
123  *      @p: the port whose settings are to be reported
124  *
125  *      Shows the link status, speed, and duplex of a port.
126  */
127 static void link_report(struct net_device *dev)
128 {
129         if (!netif_carrier_ok(dev))
130                 printk(KERN_INFO "%s: link down\n", dev->name);
131         else {
132                 const char *s = "10Mbps";
133                 const struct port_info *p = netdev_priv(dev);
134
135                 switch (p->link_config.speed) {
136                 case SPEED_10000:
137                         s = "10Gbps";
138                         break;
139                 case SPEED_1000:
140                         s = "1000Mbps";
141                         break;
142                 case SPEED_100:
143                         s = "100Mbps";
144                         break;
145                 }
146
147                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
148                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
149         }
150 }
151
152 /**
153  *      t3_os_link_changed - handle link status changes
154  *      @adapter: the adapter associated with the link change
155  *      @port_id: the port index whose limk status has changed
156  *      @link_stat: the new status of the link
157  *      @speed: the new speed setting
158  *      @duplex: the new duplex setting
159  *      @pause: the new flow-control setting
160  *
161  *      This is the OS-dependent handler for link status changes.  The OS
162  *      neutral handler takes care of most of the processing for these events,
163  *      then calls this handler for any OS-specific processing.
164  */
165 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
166                         int speed, int duplex, int pause)
167 {
168         struct net_device *dev = adapter->port[port_id];
169
170         /* Skip changes from disabled ports. */
171         if (!netif_running(dev))
172                 return;
173
174         if (link_stat != netif_carrier_ok(dev)) {
175                 if (link_stat)
176                         netif_carrier_on(dev);
177                 else
178                         netif_carrier_off(dev);
179                 link_report(dev);
180         }
181 }
182
183 static void cxgb_set_rxmode(struct net_device *dev)
184 {
185         struct t3_rx_mode rm;
186         struct port_info *pi = netdev_priv(dev);
187
188         init_rx_mode(&rm, dev, dev->mc_list);
189         t3_mac_set_rx_mode(&pi->mac, &rm);
190 }
191
192 /**
193  *      link_start - enable a port
194  *      @dev: the device to enable
195  *
196  *      Performs the MAC and PHY actions needed to enable a port.
197  */
198 static void link_start(struct net_device *dev)
199 {
200         struct t3_rx_mode rm;
201         struct port_info *pi = netdev_priv(dev);
202         struct cmac *mac = &pi->mac;
203
204         init_rx_mode(&rm, dev, dev->mc_list);
205         t3_mac_reset(mac);
206         t3_mac_set_mtu(mac, dev->mtu);
207         t3_mac_set_address(mac, 0, dev->dev_addr);
208         t3_mac_set_rx_mode(mac, &rm);
209         t3_link_start(&pi->phy, mac, &pi->link_config);
210         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
211 }
212
213 static inline void cxgb_disable_msi(struct adapter *adapter)
214 {
215         if (adapter->flags & USING_MSIX) {
216                 pci_disable_msix(adapter->pdev);
217                 adapter->flags &= ~USING_MSIX;
218         } else if (adapter->flags & USING_MSI) {
219                 pci_disable_msi(adapter->pdev);
220                 adapter->flags &= ~USING_MSI;
221         }
222 }
223
224 /*
225  * Interrupt handler for asynchronous events used with MSI-X.
226  */
227 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
228 {
229         t3_slow_intr_handler(cookie);
230         return IRQ_HANDLED;
231 }
232
233 /*
234  * Name the MSI-X interrupts.
235  */
236 static void name_msix_vecs(struct adapter *adap)
237 {
238         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
239
240         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
241         adap->msix_info[0].desc[n] = 0;
242
243         for_each_port(adap, j) {
244                 struct net_device *d = adap->port[j];
245                 const struct port_info *pi = netdev_priv(d);
246
247                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
248                         snprintf(adap->msix_info[msi_idx].desc, n,
249                                  "%s (queue %d)", d->name, i);
250                         adap->msix_info[msi_idx].desc[n] = 0;
251                 }
252         }
253 }
254
255 static int request_msix_data_irqs(struct adapter *adap)
256 {
257         int i, j, err, qidx = 0;
258
259         for_each_port(adap, i) {
260                 int nqsets = adap2pinfo(adap, i)->nqsets;
261
262                 for (j = 0; j < nqsets; ++j) {
263                         err = request_irq(adap->msix_info[qidx + 1].vec,
264                                           t3_intr_handler(adap,
265                                                           adap->sge.qs[qidx].
266                                                           rspq.polling), 0,
267                                           adap->msix_info[qidx + 1].desc,
268                                           &adap->sge.qs[qidx]);
269                         if (err) {
270                                 while (--qidx >= 0)
271                                         free_irq(adap->msix_info[qidx + 1].vec,
272                                                  &adap->sge.qs[qidx]);
273                                 return err;
274                         }
275                         qidx++;
276                 }
277         }
278         return 0;
279 }
280
281 /**
282  *      setup_rss - configure RSS
283  *      @adap: the adapter
284  *
285  *      Sets up RSS to distribute packets to multiple receive queues.  We
286  *      configure the RSS CPU lookup table to distribute to the number of HW
287  *      receive queues, and the response queue lookup table to narrow that
288  *      down to the response queues actually configured for each port.
289  *      We always configure the RSS mapping for two ports since the mapping
290  *      table has plenty of entries.
291  */
292 static void setup_rss(struct adapter *adap)
293 {
294         int i;
295         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
296         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
297         u8 cpus[SGE_QSETS + 1];
298         u16 rspq_map[RSS_TABLE_SIZE];
299
300         for (i = 0; i < SGE_QSETS; ++i)
301                 cpus[i] = i;
302         cpus[SGE_QSETS] = 0xff; /* terminator */
303
304         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
305                 rspq_map[i] = i % nq0;
306                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
307         }
308
309         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
310                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
311                       V_RRCPLCPUSIZE(6), cpus, rspq_map);
312 }
313
314 /*
315  * If we have multiple receive queues per port serviced by NAPI we need one
316  * netdevice per queue as NAPI operates on netdevices.  We already have one
317  * netdevice, namely the one associated with the interface, so we use dummy
318  * ones for any additional queues.  Note that these netdevices exist purely
319  * so that NAPI has something to work with, they do not represent network
320  * ports and are not registered.
321  */
322 static int init_dummy_netdevs(struct adapter *adap)
323 {
324         int i, j, dummy_idx = 0;
325         struct net_device *nd;
326
327         for_each_port(adap, i) {
328                 struct net_device *dev = adap->port[i];
329                 const struct port_info *pi = netdev_priv(dev);
330
331                 for (j = 0; j < pi->nqsets - 1; j++) {
332                         if (!adap->dummy_netdev[dummy_idx]) {
333                                 nd = alloc_netdev(0, "", ether_setup);
334                                 if (!nd)
335                                         goto free_all;
336
337                                 nd->priv = adap;
338                                 nd->weight = 64;
339                                 set_bit(__LINK_STATE_START, &nd->state);
340                                 adap->dummy_netdev[dummy_idx] = nd;
341                         }
342                         strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
343                         dummy_idx++;
344                 }
345         }
346         return 0;
347
348 free_all:
349         while (--dummy_idx >= 0) {
350                 free_netdev(adap->dummy_netdev[dummy_idx]);
351                 adap->dummy_netdev[dummy_idx] = NULL;
352         }
353         return -ENOMEM;
354 }
355
356 /*
357  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
358  * both netdevices representing interfaces and the dummy ones for the extra
359  * queues.
360  */
361 static void quiesce_rx(struct adapter *adap)
362 {
363         int i;
364         struct net_device *dev;
365
366         for_each_port(adap, i) {
367                 dev = adap->port[i];
368                 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
369                         msleep(1);
370         }
371
372         for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
373                 dev = adap->dummy_netdev[i];
374                 if (dev)
375                         while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
376                                 msleep(1);
377         }
378 }
379
380 /**
381  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
382  *      @adap: the adapter
383  *
384  *      Determines how many sets of SGE queues to use and initializes them.
385  *      We support multiple queue sets per port if we have MSI-X, otherwise
386  *      just one queue set per port.
387  */
388 static int setup_sge_qsets(struct adapter *adap)
389 {
390         int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
391         unsigned int ntxq = is_offload(adap) ? SGE_TXQ_PER_SET : 1;
392
393         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
394                 irq_idx = -1;
395
396         for_each_port(adap, i) {
397                 struct net_device *dev = adap->port[i];
398                 const struct port_info *pi = netdev_priv(dev);
399
400                 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
401                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
402                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
403                                                              irq_idx,
404                                 &adap->params.sge.qset[qset_idx], ntxq,
405                                 j == 0 ? dev :
406                                          adap-> dummy_netdev[dummy_dev_idx++]);
407                         if (err) {
408                                 t3_free_sge_resources(adap);
409                                 return err;
410                         }
411                 }
412         }
413
414         return 0;
415 }
416
417 static ssize_t attr_show(struct class_device *cd, char *buf,
418                          ssize_t(*format) (struct adapter *, char *))
419 {
420         ssize_t len;
421         struct adapter *adap = to_net_dev(cd)->priv;
422
423         /* Synchronize with ioctls that may shut down the device */
424         rtnl_lock();
425         len = (*format) (adap, buf);
426         rtnl_unlock();
427         return len;
428 }
429
430 static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
431                           ssize_t(*set) (struct adapter *, unsigned int),
432                           unsigned int min_val, unsigned int max_val)
433 {
434         char *endp;
435         ssize_t ret;
436         unsigned int val;
437         struct adapter *adap = to_net_dev(cd)->priv;
438
439         if (!capable(CAP_NET_ADMIN))
440                 return -EPERM;
441
442         val = simple_strtoul(buf, &endp, 0);
443         if (endp == buf || val < min_val || val > max_val)
444                 return -EINVAL;
445
446         rtnl_lock();
447         ret = (*set) (adap, val);
448         if (!ret)
449                 ret = len;
450         rtnl_unlock();
451         return ret;
452 }
453
454 #define CXGB3_SHOW(name, val_expr) \
455 static ssize_t format_##name(struct adapter *adap, char *buf) \
456 { \
457         return sprintf(buf, "%u\n", val_expr); \
458 } \
459 static ssize_t show_##name(struct class_device *cd, char *buf) \
460 { \
461         return attr_show(cd, buf, format_##name); \
462 }
463
464 static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
465 {
466         if (adap->flags & FULL_INIT_DONE)
467                 return -EBUSY;
468         if (val && adap->params.rev == 0)
469                 return -EINVAL;
470         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers)
471                 return -EINVAL;
472         adap->params.mc5.nfilters = val;
473         return 0;
474 }
475
476 static ssize_t store_nfilters(struct class_device *cd, const char *buf,
477                               size_t len)
478 {
479         return attr_store(cd, buf, len, set_nfilters, 0, ~0);
480 }
481
482 static ssize_t set_nservers(struct adapter *adap, unsigned int val)
483 {
484         if (adap->flags & FULL_INIT_DONE)
485                 return -EBUSY;
486         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters)
487                 return -EINVAL;
488         adap->params.mc5.nservers = val;
489         return 0;
490 }
491
492 static ssize_t store_nservers(struct class_device *cd, const char *buf,
493                               size_t len)
494 {
495         return attr_store(cd, buf, len, set_nservers, 0, ~0);
496 }
497
498 #define CXGB3_ATTR_R(name, val_expr) \
499 CXGB3_SHOW(name, val_expr) \
500 static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
501
502 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
503 CXGB3_SHOW(name, val_expr) \
504 static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
505
506 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
507 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
508 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
509
510 static struct attribute *cxgb3_attrs[] = {
511         &class_device_attr_cam_size.attr,
512         &class_device_attr_nfilters.attr,
513         &class_device_attr_nservers.attr,
514         NULL
515 };
516
517 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
518
519 static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
520 {
521         ssize_t len;
522         unsigned int v, addr, bpt, cpt;
523         struct adapter *adap = to_net_dev(cd)->priv;
524
525         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
526         rtnl_lock();
527         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
528         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
529         if (sched & 1)
530                 v >>= 16;
531         bpt = (v >> 8) & 0xff;
532         cpt = v & 0xff;
533         if (!cpt)
534                 len = sprintf(buf, "disabled\n");
535         else {
536                 v = (adap->params.vpd.cclk * 1000) / cpt;
537                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
538         }
539         rtnl_unlock();
540         return len;
541 }
542
543 static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
544                              size_t len, int sched)
545 {
546         char *endp;
547         ssize_t ret;
548         unsigned int val;
549         struct adapter *adap = to_net_dev(cd)->priv;
550
551         if (!capable(CAP_NET_ADMIN))
552                 return -EPERM;
553
554         val = simple_strtoul(buf, &endp, 0);
555         if (endp == buf || val > 10000000)
556                 return -EINVAL;
557
558         rtnl_lock();
559         ret = t3_config_sched(adap, val, sched);
560         if (!ret)
561                 ret = len;
562         rtnl_unlock();
563         return ret;
564 }
565
566 #define TM_ATTR(name, sched) \
567 static ssize_t show_##name(struct class_device *cd, char *buf) \
568 { \
569         return tm_attr_show(cd, buf, sched); \
570 } \
571 static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
572 { \
573         return tm_attr_store(cd, buf, len, sched); \
574 } \
575 static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
576
577 TM_ATTR(sched0, 0);
578 TM_ATTR(sched1, 1);
579 TM_ATTR(sched2, 2);
580 TM_ATTR(sched3, 3);
581 TM_ATTR(sched4, 4);
582 TM_ATTR(sched5, 5);
583 TM_ATTR(sched6, 6);
584 TM_ATTR(sched7, 7);
585
586 static struct attribute *offload_attrs[] = {
587         &class_device_attr_sched0.attr,
588         &class_device_attr_sched1.attr,
589         &class_device_attr_sched2.attr,
590         &class_device_attr_sched3.attr,
591         &class_device_attr_sched4.attr,
592         &class_device_attr_sched5.attr,
593         &class_device_attr_sched6.attr,
594         &class_device_attr_sched7.attr,
595         NULL
596 };
597
598 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
599
600 /*
601  * Sends an sk_buff to an offload queue driver
602  * after dealing with any active network taps.
603  */
604 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
605 {
606         int ret;
607
608         local_bh_disable();
609         ret = t3_offload_tx(tdev, skb);
610         local_bh_enable();
611         return ret;
612 }
613
614 static int write_smt_entry(struct adapter *adapter, int idx)
615 {
616         struct cpl_smt_write_req *req;
617         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
618
619         if (!skb)
620                 return -ENOMEM;
621
622         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
623         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
624         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
625         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
626         req->iff = idx;
627         memset(req->src_mac1, 0, sizeof(req->src_mac1));
628         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
629         skb->priority = 1;
630         offload_tx(&adapter->tdev, skb);
631         return 0;
632 }
633
634 static int init_smt(struct adapter *adapter)
635 {
636         int i;
637
638         for_each_port(adapter, i)
639             write_smt_entry(adapter, i);
640         return 0;
641 }
642
643 static void init_port_mtus(struct adapter *adapter)
644 {
645         unsigned int mtus = adapter->port[0]->mtu;
646
647         if (adapter->port[1])
648                 mtus |= adapter->port[1]->mtu << 16;
649         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
650 }
651
652 /**
653  *      cxgb_up - enable the adapter
654  *      @adapter: adapter being enabled
655  *
656  *      Called when the first port is enabled, this function performs the
657  *      actions necessary to make an adapter operational, such as completing
658  *      the initialization of HW modules, and enabling interrupts.
659  *
660  *      Must be called with the rtnl lock held.
661  */
662 static int cxgb_up(struct adapter *adap)
663 {
664         int err = 0;
665
666         if (!(adap->flags & FULL_INIT_DONE)) {
667                 err = t3_check_fw_version(adap);
668                 if (err) {
669                         dev_err(&adap->pdev->dev,
670                                 "adapter FW is not compatible with driver\n");
671                         goto out;
672                 }
673
674                 err = init_dummy_netdevs(adap);
675                 if (err)
676                         goto out;
677
678                 err = t3_init_hw(adap, 0);
679                 if (err)
680                         goto out;
681
682                 err = setup_sge_qsets(adap);
683                 if (err)
684                         goto out;
685
686                 setup_rss(adap);
687                 adap->flags |= FULL_INIT_DONE;
688         }
689
690         t3_intr_clear(adap);
691
692         if (adap->flags & USING_MSIX) {
693                 name_msix_vecs(adap);
694                 err = request_irq(adap->msix_info[0].vec,
695                                   t3_async_intr_handler, 0,
696                                   adap->msix_info[0].desc, adap);
697                 if (err)
698                         goto irq_err;
699
700                 if (request_msix_data_irqs(adap)) {
701                         free_irq(adap->msix_info[0].vec, adap);
702                         goto irq_err;
703                 }
704         } else if ((err = request_irq(adap->pdev->irq,
705                                       t3_intr_handler(adap,
706                                                       adap->sge.qs[0].rspq.
707                                                       polling),
708                                       (adap->flags & USING_MSI) ? 0 : SA_SHIRQ,
709                                       adap->name, adap)))
710                 goto irq_err;
711
712         t3_sge_start(adap);
713         t3_intr_enable(adap);
714 out:
715         return err;
716 irq_err:
717         CH_ERR(adap, "request_irq failed, err %d\n", err);
718         goto out;
719 }
720
721 /*
722  * Release resources when all the ports and offloading have been stopped.
723  */
724 static void cxgb_down(struct adapter *adapter)
725 {
726         t3_sge_stop(adapter);
727         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
728         t3_intr_disable(adapter);
729         spin_unlock_irq(&adapter->work_lock);
730
731         if (adapter->flags & USING_MSIX) {
732                 int i, n = 0;
733
734                 free_irq(adapter->msix_info[0].vec, adapter);
735                 for_each_port(adapter, i)
736                     n += adap2pinfo(adapter, i)->nqsets;
737
738                 for (i = 0; i < n; ++i)
739                         free_irq(adapter->msix_info[i + 1].vec,
740                                  &adapter->sge.qs[i]);
741         } else
742                 free_irq(adapter->pdev->irq, adapter);
743
744         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
745         quiesce_rx(adapter);
746 }
747
748 static void schedule_chk_task(struct adapter *adap)
749 {
750         unsigned int timeo;
751
752         timeo = adap->params.linkpoll_period ?
753             (HZ * adap->params.linkpoll_period) / 10 :
754             adap->params.stats_update_period * HZ;
755         if (timeo)
756                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
757 }
758
759 static int offload_open(struct net_device *dev)
760 {
761         struct adapter *adapter = dev->priv;
762         struct t3cdev *tdev = T3CDEV(dev);
763         int adap_up = adapter->open_device_map & PORT_MASK;
764         int err = 0;
765
766         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
767                 return 0;
768
769         if (!adap_up && (err = cxgb_up(adapter)) < 0)
770                 return err;
771
772         t3_tp_set_offload_mode(adapter, 1);
773         tdev->lldev = adapter->port[0];
774         err = cxgb3_offload_activate(adapter);
775         if (err)
776                 goto out;
777
778         init_port_mtus(adapter);
779         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
780                      adapter->params.b_wnd,
781                      adapter->params.rev == 0 ?
782                      adapter->port[0]->mtu : 0xffff);
783         init_smt(adapter);
784
785         /* Never mind if the next step fails */
786         sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
787
788         /* Call back all registered clients */
789         cxgb3_add_clients(tdev);
790
791 out:
792         /* restore them in case the offload module has changed them */
793         if (err) {
794                 t3_tp_set_offload_mode(adapter, 0);
795                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
796                 cxgb3_set_dummy_ops(tdev);
797         }
798         return err;
799 }
800
801 static int offload_close(struct t3cdev *tdev)
802 {
803         struct adapter *adapter = tdev2adap(tdev);
804
805         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
806                 return 0;
807
808         /* Call back all registered clients */
809         cxgb3_remove_clients(tdev);
810
811         sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
812
813         tdev->lldev = NULL;
814         cxgb3_set_dummy_ops(tdev);
815         t3_tp_set_offload_mode(adapter, 0);
816         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
817
818         if (!adapter->open_device_map)
819                 cxgb_down(adapter);
820
821         cxgb3_offload_deactivate(adapter);
822         return 0;
823 }
824
825 static int cxgb_open(struct net_device *dev)
826 {
827         int err;
828         struct adapter *adapter = dev->priv;
829         struct port_info *pi = netdev_priv(dev);
830         int other_ports = adapter->open_device_map & PORT_MASK;
831
832         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
833                 return err;
834
835         set_bit(pi->port_id, &adapter->open_device_map);
836         if (!ofld_disable) {
837                 err = offload_open(dev);
838                 if (err)
839                         printk(KERN_WARNING
840                                "Could not initialize offload capabilities\n");
841         }
842
843         link_start(dev);
844         t3_port_intr_enable(adapter, pi->port_id);
845         netif_start_queue(dev);
846         if (!other_ports)
847                 schedule_chk_task(adapter);
848
849         return 0;
850 }
851
852 static int cxgb_close(struct net_device *dev)
853 {
854         struct adapter *adapter = dev->priv;
855         struct port_info *p = netdev_priv(dev);
856
857         t3_port_intr_disable(adapter, p->port_id);
858         netif_stop_queue(dev);
859         p->phy.ops->power_down(&p->phy, 1);
860         netif_carrier_off(dev);
861         t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
862
863         spin_lock(&adapter->work_lock); /* sync with update task */
864         clear_bit(p->port_id, &adapter->open_device_map);
865         spin_unlock(&adapter->work_lock);
866
867         if (!(adapter->open_device_map & PORT_MASK))
868                 cancel_rearming_delayed_workqueue(cxgb3_wq,
869                                                   &adapter->adap_check_task);
870
871         if (!adapter->open_device_map)
872                 cxgb_down(adapter);
873
874         return 0;
875 }
876
877 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
878 {
879         struct adapter *adapter = dev->priv;
880         struct port_info *p = netdev_priv(dev);
881         struct net_device_stats *ns = &p->netstats;
882         const struct mac_stats *pstats;
883
884         spin_lock(&adapter->stats_lock);
885         pstats = t3_mac_update_stats(&p->mac);
886         spin_unlock(&adapter->stats_lock);
887
888         ns->tx_bytes = pstats->tx_octets;
889         ns->tx_packets = pstats->tx_frames;
890         ns->rx_bytes = pstats->rx_octets;
891         ns->rx_packets = pstats->rx_frames;
892         ns->multicast = pstats->rx_mcast_frames;
893
894         ns->tx_errors = pstats->tx_underrun;
895         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
896             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
897             pstats->rx_fifo_ovfl;
898
899         /* detailed rx_errors */
900         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
901         ns->rx_over_errors = 0;
902         ns->rx_crc_errors = pstats->rx_fcs_errs;
903         ns->rx_frame_errors = pstats->rx_symbol_errs;
904         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
905         ns->rx_missed_errors = pstats->rx_cong_drops;
906
907         /* detailed tx_errors */
908         ns->tx_aborted_errors = 0;
909         ns->tx_carrier_errors = 0;
910         ns->tx_fifo_errors = pstats->tx_underrun;
911         ns->tx_heartbeat_errors = 0;
912         ns->tx_window_errors = 0;
913         return ns;
914 }
915
916 static u32 get_msglevel(struct net_device *dev)
917 {
918         struct adapter *adapter = dev->priv;
919
920         return adapter->msg_enable;
921 }
922
923 static void set_msglevel(struct net_device *dev, u32 val)
924 {
925         struct adapter *adapter = dev->priv;
926
927         adapter->msg_enable = val;
928 }
929
930 static char stats_strings[][ETH_GSTRING_LEN] = {
931         "TxOctetsOK         ",
932         "TxFramesOK         ",
933         "TxMulticastFramesOK",
934         "TxBroadcastFramesOK",
935         "TxPauseFrames      ",
936         "TxUnderrun         ",
937         "TxExtUnderrun      ",
938
939         "TxFrames64         ",
940         "TxFrames65To127    ",
941         "TxFrames128To255   ",
942         "TxFrames256To511   ",
943         "TxFrames512To1023  ",
944         "TxFrames1024To1518 ",
945         "TxFrames1519ToMax  ",
946
947         "RxOctetsOK         ",
948         "RxFramesOK         ",
949         "RxMulticastFramesOK",
950         "RxBroadcastFramesOK",
951         "RxPauseFrames      ",
952         "RxFCSErrors        ",
953         "RxSymbolErrors     ",
954         "RxShortErrors      ",
955         "RxJabberErrors     ",
956         "RxLengthErrors     ",
957         "RxFIFOoverflow     ",
958
959         "RxFrames64         ",
960         "RxFrames65To127    ",
961         "RxFrames128To255   ",
962         "RxFrames256To511   ",
963         "RxFrames512To1023  ",
964         "RxFrames1024To1518 ",
965         "RxFrames1519ToMax  ",
966
967         "PhyFIFOErrors      ",
968         "TSO                ",
969         "VLANextractions    ",
970         "VLANinsertions     ",
971         "TxCsumOffload      ",
972         "RxCsumGood         ",
973         "RxDrops            "
974 };
975
976 static int get_stats_count(struct net_device *dev)
977 {
978         return ARRAY_SIZE(stats_strings);
979 }
980
981 #define T3_REGMAP_SIZE (3 * 1024)
982
983 static int get_regs_len(struct net_device *dev)
984 {
985         return T3_REGMAP_SIZE;
986 }
987
988 static int get_eeprom_len(struct net_device *dev)
989 {
990         return EEPROMSIZE;
991 }
992
993 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
994 {
995         u32 fw_vers = 0;
996         struct adapter *adapter = dev->priv;
997
998         t3_get_fw_version(adapter, &fw_vers);
999
1000         strcpy(info->driver, DRV_NAME);
1001         strcpy(info->version, DRV_VERSION);
1002         strcpy(info->bus_info, pci_name(adapter->pdev));
1003         if (!fw_vers)
1004                 strcpy(info->fw_version, "N/A");
1005         else
1006                 snprintf(info->fw_version, sizeof(info->fw_version),
1007                          "%s %u.%u", (fw_vers >> 24) ? "T" : "N",
1008                          (fw_vers >> 12) & 0xfff, fw_vers & 0xfff);
1009 }
1010
1011 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1012 {
1013         if (stringset == ETH_SS_STATS)
1014                 memcpy(data, stats_strings, sizeof(stats_strings));
1015 }
1016
1017 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1018                                             struct port_info *p, int idx)
1019 {
1020         int i;
1021         unsigned long tot = 0;
1022
1023         for (i = 0; i < p->nqsets; ++i)
1024                 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1025         return tot;
1026 }
1027
1028 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1029                       u64 *data)
1030 {
1031         struct adapter *adapter = dev->priv;
1032         struct port_info *pi = netdev_priv(dev);
1033         const struct mac_stats *s;
1034
1035         spin_lock(&adapter->stats_lock);
1036         s = t3_mac_update_stats(&pi->mac);
1037         spin_unlock(&adapter->stats_lock);
1038
1039         *data++ = s->tx_octets;
1040         *data++ = s->tx_frames;
1041         *data++ = s->tx_mcast_frames;
1042         *data++ = s->tx_bcast_frames;
1043         *data++ = s->tx_pause;
1044         *data++ = s->tx_underrun;
1045         *data++ = s->tx_fifo_urun;
1046
1047         *data++ = s->tx_frames_64;
1048         *data++ = s->tx_frames_65_127;
1049         *data++ = s->tx_frames_128_255;
1050         *data++ = s->tx_frames_256_511;
1051         *data++ = s->tx_frames_512_1023;
1052         *data++ = s->tx_frames_1024_1518;
1053         *data++ = s->tx_frames_1519_max;
1054
1055         *data++ = s->rx_octets;
1056         *data++ = s->rx_frames;
1057         *data++ = s->rx_mcast_frames;
1058         *data++ = s->rx_bcast_frames;
1059         *data++ = s->rx_pause;
1060         *data++ = s->rx_fcs_errs;
1061         *data++ = s->rx_symbol_errs;
1062         *data++ = s->rx_short;
1063         *data++ = s->rx_jabber;
1064         *data++ = s->rx_too_long;
1065         *data++ = s->rx_fifo_ovfl;
1066
1067         *data++ = s->rx_frames_64;
1068         *data++ = s->rx_frames_65_127;
1069         *data++ = s->rx_frames_128_255;
1070         *data++ = s->rx_frames_256_511;
1071         *data++ = s->rx_frames_512_1023;
1072         *data++ = s->rx_frames_1024_1518;
1073         *data++ = s->rx_frames_1519_max;
1074
1075         *data++ = pi->phy.fifo_errors;
1076
1077         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1078         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1079         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1080         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1081         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1082         *data++ = s->rx_cong_drops;
1083 }
1084
1085 static inline void reg_block_dump(struct adapter *ap, void *buf,
1086                                   unsigned int start, unsigned int end)
1087 {
1088         u32 *p = buf + start;
1089
1090         for (; start <= end; start += sizeof(u32))
1091                 *p++ = t3_read_reg(ap, start);
1092 }
1093
1094 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1095                      void *buf)
1096 {
1097         struct adapter *ap = dev->priv;
1098
1099         /*
1100          * Version scheme:
1101          * bits 0..9: chip version
1102          * bits 10..15: chip revision
1103          * bit 31: set for PCIe cards
1104          */
1105         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1106
1107         /*
1108          * We skip the MAC statistics registers because they are clear-on-read.
1109          * Also reading multi-register stats would need to synchronize with the
1110          * periodic mac stats accumulation.  Hard to justify the complexity.
1111          */
1112         memset(buf, 0, T3_REGMAP_SIZE);
1113         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1114         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1115         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1116         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1117         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1118         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1119                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1120         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1121                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1122 }
1123
1124 static int restart_autoneg(struct net_device *dev)
1125 {
1126         struct port_info *p = netdev_priv(dev);
1127
1128         if (!netif_running(dev))
1129                 return -EAGAIN;
1130         if (p->link_config.autoneg != AUTONEG_ENABLE)
1131                 return -EINVAL;
1132         p->phy.ops->autoneg_restart(&p->phy);
1133         return 0;
1134 }
1135
1136 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1137 {
1138         int i;
1139         struct adapter *adapter = dev->priv;
1140
1141         if (data == 0)
1142                 data = 2;
1143
1144         for (i = 0; i < data * 2; i++) {
1145                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1146                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1147                 if (msleep_interruptible(500))
1148                         break;
1149         }
1150         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1151                          F_GPIO0_OUT_VAL);
1152         return 0;
1153 }
1154
1155 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1156 {
1157         struct port_info *p = netdev_priv(dev);
1158
1159         cmd->supported = p->link_config.supported;
1160         cmd->advertising = p->link_config.advertising;
1161
1162         if (netif_carrier_ok(dev)) {
1163                 cmd->speed = p->link_config.speed;
1164                 cmd->duplex = p->link_config.duplex;
1165         } else {
1166                 cmd->speed = -1;
1167                 cmd->duplex = -1;
1168         }
1169
1170         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1171         cmd->phy_address = p->phy.addr;
1172         cmd->transceiver = XCVR_EXTERNAL;
1173         cmd->autoneg = p->link_config.autoneg;
1174         cmd->maxtxpkt = 0;
1175         cmd->maxrxpkt = 0;
1176         return 0;
1177 }
1178
1179 static int speed_duplex_to_caps(int speed, int duplex)
1180 {
1181         int cap = 0;
1182
1183         switch (speed) {
1184         case SPEED_10:
1185                 if (duplex == DUPLEX_FULL)
1186                         cap = SUPPORTED_10baseT_Full;
1187                 else
1188                         cap = SUPPORTED_10baseT_Half;
1189                 break;
1190         case SPEED_100:
1191                 if (duplex == DUPLEX_FULL)
1192                         cap = SUPPORTED_100baseT_Full;
1193                 else
1194                         cap = SUPPORTED_100baseT_Half;
1195                 break;
1196         case SPEED_1000:
1197                 if (duplex == DUPLEX_FULL)
1198                         cap = SUPPORTED_1000baseT_Full;
1199                 else
1200                         cap = SUPPORTED_1000baseT_Half;
1201                 break;
1202         case SPEED_10000:
1203                 if (duplex == DUPLEX_FULL)
1204                         cap = SUPPORTED_10000baseT_Full;
1205         }
1206         return cap;
1207 }
1208
1209 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1210                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1211                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1212                       ADVERTISED_10000baseT_Full)
1213
1214 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1215 {
1216         struct port_info *p = netdev_priv(dev);
1217         struct link_config *lc = &p->link_config;
1218
1219         if (!(lc->supported & SUPPORTED_Autoneg))
1220                 return -EOPNOTSUPP;     /* can't change speed/duplex */
1221
1222         if (cmd->autoneg == AUTONEG_DISABLE) {
1223                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1224
1225                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1226                         return -EINVAL;
1227                 lc->requested_speed = cmd->speed;
1228                 lc->requested_duplex = cmd->duplex;
1229                 lc->advertising = 0;
1230         } else {
1231                 cmd->advertising &= ADVERTISED_MASK;
1232                 cmd->advertising &= lc->supported;
1233                 if (!cmd->advertising)
1234                         return -EINVAL;
1235                 lc->requested_speed = SPEED_INVALID;
1236                 lc->requested_duplex = DUPLEX_INVALID;
1237                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1238         }
1239         lc->autoneg = cmd->autoneg;
1240         if (netif_running(dev))
1241                 t3_link_start(&p->phy, &p->mac, lc);
1242         return 0;
1243 }
1244
1245 static void get_pauseparam(struct net_device *dev,
1246                            struct ethtool_pauseparam *epause)
1247 {
1248         struct port_info *p = netdev_priv(dev);
1249
1250         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1251         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1252         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1253 }
1254
1255 static int set_pauseparam(struct net_device *dev,
1256                           struct ethtool_pauseparam *epause)
1257 {
1258         struct port_info *p = netdev_priv(dev);
1259         struct link_config *lc = &p->link_config;
1260
1261         if (epause->autoneg == AUTONEG_DISABLE)
1262                 lc->requested_fc = 0;
1263         else if (lc->supported & SUPPORTED_Autoneg)
1264                 lc->requested_fc = PAUSE_AUTONEG;
1265         else
1266                 return -EINVAL;
1267
1268         if (epause->rx_pause)
1269                 lc->requested_fc |= PAUSE_RX;
1270         if (epause->tx_pause)
1271                 lc->requested_fc |= PAUSE_TX;
1272         if (lc->autoneg == AUTONEG_ENABLE) {
1273                 if (netif_running(dev))
1274                         t3_link_start(&p->phy, &p->mac, lc);
1275         } else {
1276                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1277                 if (netif_running(dev))
1278                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1279         }
1280         return 0;
1281 }
1282
1283 static u32 get_rx_csum(struct net_device *dev)
1284 {
1285         struct port_info *p = netdev_priv(dev);
1286
1287         return p->rx_csum_offload;
1288 }
1289
1290 static int set_rx_csum(struct net_device *dev, u32 data)
1291 {
1292         struct port_info *p = netdev_priv(dev);
1293
1294         p->rx_csum_offload = data;
1295         return 0;
1296 }
1297
1298 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1299 {
1300         struct adapter *adapter = dev->priv;
1301
1302         e->rx_max_pending = MAX_RX_BUFFERS;
1303         e->rx_mini_max_pending = 0;
1304         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1305         e->tx_max_pending = MAX_TXQ_ENTRIES;
1306
1307         e->rx_pending = adapter->params.sge.qset[0].fl_size;
1308         e->rx_mini_pending = adapter->params.sge.qset[0].rspq_size;
1309         e->rx_jumbo_pending = adapter->params.sge.qset[0].jumbo_size;
1310         e->tx_pending = adapter->params.sge.qset[0].txq_size[0];
1311 }
1312
1313 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1314 {
1315         int i;
1316         struct adapter *adapter = dev->priv;
1317
1318         if (e->rx_pending > MAX_RX_BUFFERS ||
1319             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1320             e->tx_pending > MAX_TXQ_ENTRIES ||
1321             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1322             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1323             e->rx_pending < MIN_FL_ENTRIES ||
1324             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1325             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1326                 return -EINVAL;
1327
1328         if (adapter->flags & FULL_INIT_DONE)
1329                 return -EBUSY;
1330
1331         for (i = 0; i < SGE_QSETS; ++i) {
1332                 struct qset_params *q = &adapter->params.sge.qset[i];
1333
1334                 q->rspq_size = e->rx_mini_pending;
1335                 q->fl_size = e->rx_pending;
1336                 q->jumbo_size = e->rx_jumbo_pending;
1337                 q->txq_size[0] = e->tx_pending;
1338                 q->txq_size[1] = e->tx_pending;
1339                 q->txq_size[2] = e->tx_pending;
1340         }
1341         return 0;
1342 }
1343
1344 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1345 {
1346         struct adapter *adapter = dev->priv;
1347         struct qset_params *qsp = &adapter->params.sge.qset[0];
1348         struct sge_qset *qs = &adapter->sge.qs[0];
1349
1350         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1351                 return -EINVAL;
1352
1353         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1354         t3_update_qset_coalesce(qs, qsp);
1355         return 0;
1356 }
1357
1358 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1359 {
1360         struct adapter *adapter = dev->priv;
1361         struct qset_params *q = adapter->params.sge.qset;
1362
1363         c->rx_coalesce_usecs = q->coalesce_usecs;
1364         return 0;
1365 }
1366
1367 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1368                       u8 * data)
1369 {
1370         int i, err = 0;
1371         struct adapter *adapter = dev->priv;
1372
1373         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1374         if (!buf)
1375                 return -ENOMEM;
1376
1377         e->magic = EEPROM_MAGIC;
1378         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1379                 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1380
1381         if (!err)
1382                 memcpy(data, buf + e->offset, e->len);
1383         kfree(buf);
1384         return err;
1385 }
1386
1387 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1388                       u8 * data)
1389 {
1390         u8 *buf;
1391         int err = 0;
1392         u32 aligned_offset, aligned_len, *p;
1393         struct adapter *adapter = dev->priv;
1394
1395         if (eeprom->magic != EEPROM_MAGIC)
1396                 return -EINVAL;
1397
1398         aligned_offset = eeprom->offset & ~3;
1399         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1400
1401         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1402                 buf = kmalloc(aligned_len, GFP_KERNEL);
1403                 if (!buf)
1404                         return -ENOMEM;
1405                 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1406                 if (!err && aligned_len > 4)
1407                         err = t3_seeprom_read(adapter,
1408                                               aligned_offset + aligned_len - 4,
1409                                               (u32 *) & buf[aligned_len - 4]);
1410                 if (err)
1411                         goto out;
1412                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1413         } else
1414                 buf = data;
1415
1416         err = t3_seeprom_wp(adapter, 0);
1417         if (err)
1418                 goto out;
1419
1420         for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1421                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1422                 aligned_offset += 4;
1423         }
1424
1425         if (!err)
1426                 err = t3_seeprom_wp(adapter, 1);
1427 out:
1428         if (buf != data)
1429                 kfree(buf);
1430         return err;
1431 }
1432
1433 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1434 {
1435         wol->supported = 0;
1436         wol->wolopts = 0;
1437         memset(&wol->sopass, 0, sizeof(wol->sopass));
1438 }
1439
1440 static const struct ethtool_ops cxgb_ethtool_ops = {
1441         .get_settings = get_settings,
1442         .set_settings = set_settings,
1443         .get_drvinfo = get_drvinfo,
1444         .get_msglevel = get_msglevel,
1445         .set_msglevel = set_msglevel,
1446         .get_ringparam = get_sge_param,
1447         .set_ringparam = set_sge_param,
1448         .get_coalesce = get_coalesce,
1449         .set_coalesce = set_coalesce,
1450         .get_eeprom_len = get_eeprom_len,
1451         .get_eeprom = get_eeprom,
1452         .set_eeprom = set_eeprom,
1453         .get_pauseparam = get_pauseparam,
1454         .set_pauseparam = set_pauseparam,
1455         .get_rx_csum = get_rx_csum,
1456         .set_rx_csum = set_rx_csum,
1457         .get_tx_csum = ethtool_op_get_tx_csum,
1458         .set_tx_csum = ethtool_op_set_tx_csum,
1459         .get_sg = ethtool_op_get_sg,
1460         .set_sg = ethtool_op_set_sg,
1461         .get_link = ethtool_op_get_link,
1462         .get_strings = get_strings,
1463         .phys_id = cxgb3_phys_id,
1464         .nway_reset = restart_autoneg,
1465         .get_stats_count = get_stats_count,
1466         .get_ethtool_stats = get_stats,
1467         .get_regs_len = get_regs_len,
1468         .get_regs = get_regs,
1469         .get_wol = get_wol,
1470         .get_tso = ethtool_op_get_tso,
1471         .set_tso = ethtool_op_set_tso,
1472         .get_perm_addr = ethtool_op_get_perm_addr
1473 };
1474
1475 static int in_range(int val, int lo, int hi)
1476 {
1477         return val < 0 || (val <= hi && val >= lo);
1478 }
1479
1480 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1481 {
1482         int ret;
1483         u32 cmd;
1484         struct adapter *adapter = dev->priv;
1485
1486         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1487                 return -EFAULT;
1488
1489         switch (cmd) {
1490         case CHELSIO_SETREG:{
1491                 struct ch_reg edata;
1492
1493                 if (!capable(CAP_NET_ADMIN))
1494                         return -EPERM;
1495                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1496                         return -EFAULT;
1497                 if ((edata.addr & 3) != 0
1498                         || edata.addr >= adapter->mmio_len)
1499                         return -EINVAL;
1500                 writel(edata.val, adapter->regs + edata.addr);
1501                 break;
1502         }
1503         case CHELSIO_GETREG:{
1504                 struct ch_reg edata;
1505
1506                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1507                         return -EFAULT;
1508                 if ((edata.addr & 3) != 0
1509                         || edata.addr >= adapter->mmio_len)
1510                         return -EINVAL;
1511                 edata.val = readl(adapter->regs + edata.addr);
1512                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1513                         return -EFAULT;
1514                 break;
1515         }
1516         case CHELSIO_SET_QSET_PARAMS:{
1517                 int i;
1518                 struct qset_params *q;
1519                 struct ch_qset_params t;
1520
1521                 if (!capable(CAP_NET_ADMIN))
1522                         return -EPERM;
1523                 if (copy_from_user(&t, useraddr, sizeof(t)))
1524                         return -EFAULT;
1525                 if (t.qset_idx >= SGE_QSETS)
1526                         return -EINVAL;
1527                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1528                         !in_range(t.cong_thres, 0, 255) ||
1529                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1530                                 MAX_TXQ_ENTRIES) ||
1531                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1532                                 MAX_TXQ_ENTRIES) ||
1533                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1534                                 MAX_CTRL_TXQ_ENTRIES) ||
1535                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1536                                 MAX_RX_BUFFERS)
1537                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1538                                         MAX_RX_JUMBO_BUFFERS)
1539                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1540                                         MAX_RSPQ_ENTRIES))
1541                         return -EINVAL;
1542                 if ((adapter->flags & FULL_INIT_DONE) &&
1543                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1544                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1545                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1546                         t.polling >= 0 || t.cong_thres >= 0))
1547                         return -EBUSY;
1548
1549                 q = &adapter->params.sge.qset[t.qset_idx];
1550
1551                 if (t.rspq_size >= 0)
1552                         q->rspq_size = t.rspq_size;
1553                 if (t.fl_size[0] >= 0)
1554                         q->fl_size = t.fl_size[0];
1555                 if (t.fl_size[1] >= 0)
1556                         q->jumbo_size = t.fl_size[1];
1557                 if (t.txq_size[0] >= 0)
1558                         q->txq_size[0] = t.txq_size[0];
1559                 if (t.txq_size[1] >= 0)
1560                         q->txq_size[1] = t.txq_size[1];
1561                 if (t.txq_size[2] >= 0)
1562                         q->txq_size[2] = t.txq_size[2];
1563                 if (t.cong_thres >= 0)
1564                         q->cong_thres = t.cong_thres;
1565                 if (t.intr_lat >= 0) {
1566                         struct sge_qset *qs =
1567                                 &adapter->sge.qs[t.qset_idx];
1568
1569                         q->coalesce_usecs = t.intr_lat;
1570                         t3_update_qset_coalesce(qs, q);
1571                 }
1572                 if (t.polling >= 0) {
1573                         if (adapter->flags & USING_MSIX)
1574                                 q->polling = t.polling;
1575                         else {
1576                                 /* No polling with INTx for T3A */
1577                                 if (adapter->params.rev == 0 &&
1578                                         !(adapter->flags & USING_MSI))
1579                                         t.polling = 0;
1580
1581                                 for (i = 0; i < SGE_QSETS; i++) {
1582                                         q = &adapter->params.sge.
1583                                                 qset[i];
1584                                         q->polling = t.polling;
1585                                 }
1586                         }
1587                 }
1588                 break;
1589         }
1590         case CHELSIO_GET_QSET_PARAMS:{
1591                 struct qset_params *q;
1592                 struct ch_qset_params t;
1593
1594                 if (copy_from_user(&t, useraddr, sizeof(t)))
1595                         return -EFAULT;
1596                 if (t.qset_idx >= SGE_QSETS)
1597                         return -EINVAL;
1598
1599                 q = &adapter->params.sge.qset[t.qset_idx];
1600                 t.rspq_size = q->rspq_size;
1601                 t.txq_size[0] = q->txq_size[0];
1602                 t.txq_size[1] = q->txq_size[1];
1603                 t.txq_size[2] = q->txq_size[2];
1604                 t.fl_size[0] = q->fl_size;
1605                 t.fl_size[1] = q->jumbo_size;
1606                 t.polling = q->polling;
1607                 t.intr_lat = q->coalesce_usecs;
1608                 t.cong_thres = q->cong_thres;
1609
1610                 if (copy_to_user(useraddr, &t, sizeof(t)))
1611                         return -EFAULT;
1612                 break;
1613         }
1614         case CHELSIO_SET_QSET_NUM:{
1615                 struct ch_reg edata;
1616                 struct port_info *pi = netdev_priv(dev);
1617                 unsigned int i, first_qset = 0, other_qsets = 0;
1618
1619                 if (!capable(CAP_NET_ADMIN))
1620                         return -EPERM;
1621                 if (adapter->flags & FULL_INIT_DONE)
1622                         return -EBUSY;
1623                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1624                         return -EFAULT;
1625                 if (edata.val < 1 ||
1626                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1627                         return -EINVAL;
1628
1629                 for_each_port(adapter, i)
1630                         if (adapter->port[i] && adapter->port[i] != dev)
1631                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
1632
1633                 if (edata.val + other_qsets > SGE_QSETS)
1634                         return -EINVAL;
1635
1636                 pi->nqsets = edata.val;
1637
1638                 for_each_port(adapter, i)
1639                         if (adapter->port[i]) {
1640                                 pi = adap2pinfo(adapter, i);
1641                                 pi->first_qset = first_qset;
1642                                 first_qset += pi->nqsets;
1643                         }
1644                 break;
1645         }
1646         case CHELSIO_GET_QSET_NUM:{
1647                 struct ch_reg edata;
1648                 struct port_info *pi = netdev_priv(dev);
1649
1650                 edata.cmd = CHELSIO_GET_QSET_NUM;
1651                 edata.val = pi->nqsets;
1652                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1653                         return -EFAULT;
1654                 break;
1655         }
1656         case CHELSIO_LOAD_FW:{
1657                 u8 *fw_data;
1658                 struct ch_mem_range t;
1659
1660                 if (!capable(CAP_NET_ADMIN))
1661                         return -EPERM;
1662                 if (copy_from_user(&t, useraddr, sizeof(t)))
1663                         return -EFAULT;
1664
1665                 fw_data = kmalloc(t.len, GFP_KERNEL);
1666                 if (!fw_data)
1667                         return -ENOMEM;
1668
1669                 if (copy_from_user
1670                         (fw_data, useraddr + sizeof(t), t.len)) {
1671                         kfree(fw_data);
1672                         return -EFAULT;
1673                 }
1674
1675                 ret = t3_load_fw(adapter, fw_data, t.len);
1676                 kfree(fw_data);
1677                 if (ret)
1678                         return ret;
1679                 break;
1680         }
1681         case CHELSIO_SETMTUTAB:{
1682                 struct ch_mtus m;
1683                 int i;
1684
1685                 if (!is_offload(adapter))
1686                         return -EOPNOTSUPP;
1687                 if (!capable(CAP_NET_ADMIN))
1688                         return -EPERM;
1689                 if (offload_running(adapter))
1690                         return -EBUSY;
1691                 if (copy_from_user(&m, useraddr, sizeof(m)))
1692                         return -EFAULT;
1693                 if (m.nmtus != NMTUS)
1694                         return -EINVAL;
1695                 if (m.mtus[0] < 81)     /* accommodate SACK */
1696                         return -EINVAL;
1697
1698                 /* MTUs must be in ascending order */
1699                 for (i = 1; i < NMTUS; ++i)
1700                         if (m.mtus[i] < m.mtus[i - 1])
1701                                 return -EINVAL;
1702
1703                 memcpy(adapter->params.mtus, m.mtus,
1704                         sizeof(adapter->params.mtus));
1705                 break;
1706         }
1707         case CHELSIO_GET_PM:{
1708                 struct tp_params *p = &adapter->params.tp;
1709                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1710
1711                 if (!is_offload(adapter))
1712                         return -EOPNOTSUPP;
1713                 m.tx_pg_sz = p->tx_pg_size;
1714                 m.tx_num_pg = p->tx_num_pgs;
1715                 m.rx_pg_sz = p->rx_pg_size;
1716                 m.rx_num_pg = p->rx_num_pgs;
1717                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1718                 if (copy_to_user(useraddr, &m, sizeof(m)))
1719                         return -EFAULT;
1720                 break;
1721         }
1722         case CHELSIO_SET_PM:{
1723                 struct ch_pm m;
1724                 struct tp_params *p = &adapter->params.tp;
1725
1726                 if (!is_offload(adapter))
1727                         return -EOPNOTSUPP;
1728                 if (!capable(CAP_NET_ADMIN))
1729                         return -EPERM;
1730                 if (adapter->flags & FULL_INIT_DONE)
1731                         return -EBUSY;
1732                 if (copy_from_user(&m, useraddr, sizeof(m)))
1733                         return -EFAULT;
1734                 if (!m.rx_pg_sz || (m.rx_pg_sz & (m.rx_pg_sz - 1)) ||
1735                         !m.tx_pg_sz || (m.tx_pg_sz & (m.tx_pg_sz - 1)))
1736                         return -EINVAL; /* not power of 2 */
1737                 if (!(m.rx_pg_sz & 0x14000))
1738                         return -EINVAL; /* not 16KB or 64KB */
1739                 if (!(m.tx_pg_sz & 0x1554000))
1740                         return -EINVAL;
1741                 if (m.tx_num_pg == -1)
1742                         m.tx_num_pg = p->tx_num_pgs;
1743                 if (m.rx_num_pg == -1)
1744                         m.rx_num_pg = p->rx_num_pgs;
1745                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1746                         return -EINVAL;
1747                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1748                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1749                         return -EINVAL;
1750                 p->rx_pg_size = m.rx_pg_sz;
1751                 p->tx_pg_size = m.tx_pg_sz;
1752                 p->rx_num_pgs = m.rx_num_pg;
1753                 p->tx_num_pgs = m.tx_num_pg;
1754                 break;
1755         }
1756         case CHELSIO_GET_MEM:{
1757                 struct ch_mem_range t;
1758                 struct mc7 *mem;
1759                 u64 buf[32];
1760
1761                 if (!is_offload(adapter))
1762                         return -EOPNOTSUPP;
1763                 if (!(adapter->flags & FULL_INIT_DONE))
1764                         return -EIO;    /* need the memory controllers */
1765                 if (copy_from_user(&t, useraddr, sizeof(t)))
1766                         return -EFAULT;
1767                 if ((t.addr & 7) || (t.len & 7))
1768                         return -EINVAL;
1769                 if (t.mem_id == MEM_CM)
1770                         mem = &adapter->cm;
1771                 else if (t.mem_id == MEM_PMRX)
1772                         mem = &adapter->pmrx;
1773                 else if (t.mem_id == MEM_PMTX)
1774                         mem = &adapter->pmtx;
1775                 else
1776                         return -EINVAL;
1777
1778                 /*
1779                         * Version scheme:
1780                         * bits 0..9: chip version
1781                         * bits 10..15: chip revision
1782                         */
1783                 t.version = 3 | (adapter->params.rev << 10);
1784                 if (copy_to_user(useraddr, &t, sizeof(t)))
1785                         return -EFAULT;
1786
1787                 /*
1788                  * Read 256 bytes at a time as len can be large and we don't
1789                  * want to use huge intermediate buffers.
1790                  */
1791                 useraddr += sizeof(t);  /* advance to start of buffer */
1792                 while (t.len) {
1793                         unsigned int chunk =
1794                                 min_t(unsigned int, t.len, sizeof(buf));
1795
1796                         ret =
1797                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1798                                                 buf);
1799                         if (ret)
1800                                 return ret;
1801                         if (copy_to_user(useraddr, buf, chunk))
1802                                 return -EFAULT;
1803                         useraddr += chunk;
1804                         t.addr += chunk;
1805                         t.len -= chunk;
1806                 }
1807                 break;
1808         }
1809         case CHELSIO_SET_TRACE_FILTER:{
1810                 struct ch_trace t;
1811                 const struct trace_params *tp;
1812
1813                 if (!capable(CAP_NET_ADMIN))
1814                         return -EPERM;
1815                 if (!offload_running(adapter))
1816                         return -EAGAIN;
1817                 if (copy_from_user(&t, useraddr, sizeof(t)))
1818                         return -EFAULT;
1819
1820                 tp = (const struct trace_params *)&t.sip;
1821                 if (t.config_tx)
1822                         t3_config_trace_filter(adapter, tp, 0,
1823                                                 t.invert_match,
1824                                                 t.trace_tx);
1825                 if (t.config_rx)
1826                         t3_config_trace_filter(adapter, tp, 1,
1827                                                 t.invert_match,
1828                                                 t.trace_rx);
1829                 break;
1830         }
1831         case CHELSIO_SET_PKTSCHED:{
1832                 struct sk_buff *skb;
1833                 struct ch_pktsched_params p;
1834                 struct mngt_pktsched_wr *req;
1835
1836                 if (!(adapter->flags & FULL_INIT_DONE))
1837                         return -EIO;    /* uP must be up and running */
1838                 if (copy_from_user(&p, useraddr, sizeof(p)))
1839                         return -EFAULT;
1840                 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1841                 if (!skb)
1842                         return -ENOMEM;
1843                 req =
1844                         (struct mngt_pktsched_wr *)skb_put(skb,
1845                                                         sizeof(*req));
1846                 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
1847                 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
1848                 req->sched = p.sched;
1849                 req->idx = p.idx;
1850                 req->min = p.min;
1851                 req->max = p.max;
1852                 req->binding = p.binding;
1853                 printk(KERN_INFO
1854                         "pktsched: sched %u idx %u min %u max %u binding %u\n",
1855                         req->sched, req->idx, req->min, req->max,
1856                         req->binding);
1857                 skb->priority = 1;
1858                 offload_tx(&adapter->tdev, skb);
1859                 break;
1860         }
1861         default:
1862                 return -EOPNOTSUPP;
1863         }
1864         return 0;
1865 }
1866
1867 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1868 {
1869         int ret, mmd;
1870         struct adapter *adapter = dev->priv;
1871         struct port_info *pi = netdev_priv(dev);
1872         struct mii_ioctl_data *data = if_mii(req);
1873
1874         switch (cmd) {
1875         case SIOCGMIIPHY:
1876                 data->phy_id = pi->phy.addr;
1877                 /* FALLTHRU */
1878         case SIOCGMIIREG:{
1879                 u32 val;
1880                 struct cphy *phy = &pi->phy;
1881
1882                 if (!phy->mdio_read)
1883                         return -EOPNOTSUPP;
1884                 if (is_10G(adapter)) {
1885                         mmd = data->phy_id >> 8;
1886                         if (!mmd)
1887                                 mmd = MDIO_DEV_PCS;
1888                         else if (mmd > MDIO_DEV_XGXS)
1889                                 return -EINVAL;
1890
1891                         ret =
1892                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1893                                                 mmd, data->reg_num, &val);
1894                 } else
1895                         ret =
1896                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
1897                                                 0, data->reg_num & 0x1f,
1898                                                 &val);
1899                 if (!ret)
1900                         data->val_out = val;
1901                 break;
1902         }
1903         case SIOCSMIIREG:{
1904                 struct cphy *phy = &pi->phy;
1905
1906                 if (!capable(CAP_NET_ADMIN))
1907                         return -EPERM;
1908                 if (!phy->mdio_write)
1909                         return -EOPNOTSUPP;
1910                 if (is_10G(adapter)) {
1911                         mmd = data->phy_id >> 8;
1912                         if (!mmd)
1913                                 mmd = MDIO_DEV_PCS;
1914                         else if (mmd > MDIO_DEV_XGXS)
1915                                 return -EINVAL;
1916
1917                         ret =
1918                                 phy->mdio_write(adapter,
1919                                                 data->phy_id & 0x1f, mmd,
1920                                                 data->reg_num,
1921                                                 data->val_in);
1922                 } else
1923                         ret =
1924                                 phy->mdio_write(adapter,
1925                                                 data->phy_id & 0x1f, 0,
1926                                                 data->reg_num & 0x1f,
1927                                                 data->val_in);
1928                 break;
1929         }
1930         case SIOCCHIOCTL:
1931                 return cxgb_extension_ioctl(dev, req->ifr_data);
1932         default:
1933                 return -EOPNOTSUPP;
1934         }
1935         return ret;
1936 }
1937
1938 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1939 {
1940         int ret;
1941         struct adapter *adapter = dev->priv;
1942         struct port_info *pi = netdev_priv(dev);
1943
1944         if (new_mtu < 81)       /* accommodate SACK */
1945                 return -EINVAL;
1946         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
1947                 return ret;
1948         dev->mtu = new_mtu;
1949         init_port_mtus(adapter);
1950         if (adapter->params.rev == 0 && offload_running(adapter))
1951                 t3_load_mtus(adapter, adapter->params.mtus,
1952                              adapter->params.a_wnd, adapter->params.b_wnd,
1953                              adapter->port[0]->mtu);
1954         return 0;
1955 }
1956
1957 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1958 {
1959         struct adapter *adapter = dev->priv;
1960         struct port_info *pi = netdev_priv(dev);
1961         struct sockaddr *addr = p;
1962
1963         if (!is_valid_ether_addr(addr->sa_data))
1964                 return -EINVAL;
1965
1966         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1967         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
1968         if (offload_running(adapter))
1969                 write_smt_entry(adapter, pi->port_id);
1970         return 0;
1971 }
1972
1973 /**
1974  * t3_synchronize_rx - wait for current Rx processing on a port to complete
1975  * @adap: the adapter
1976  * @p: the port
1977  *
1978  * Ensures that current Rx processing on any of the queues associated with
1979  * the given port completes before returning.  We do this by acquiring and
1980  * releasing the locks of the response queues associated with the port.
1981  */
1982 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
1983 {
1984         int i;
1985
1986         for (i = 0; i < p->nqsets; i++) {
1987                 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
1988
1989                 spin_lock_irq(&q->lock);
1990                 spin_unlock_irq(&q->lock);
1991         }
1992 }
1993
1994 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
1995 {
1996         struct adapter *adapter = dev->priv;
1997         struct port_info *pi = netdev_priv(dev);
1998
1999         pi->vlan_grp = grp;
2000         if (adapter->params.rev > 0)
2001                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2002         else {
2003                 /* single control for all ports */
2004                 unsigned int i, have_vlans = 0;
2005                 for_each_port(adapter, i)
2006                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2007
2008                 t3_set_vlan_accel(adapter, 1, have_vlans);
2009         }
2010         t3_synchronize_rx(adapter, pi);
2011 }
2012
2013 static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2014 {
2015         /* nothing */
2016 }
2017
2018 #ifdef CONFIG_NET_POLL_CONTROLLER
2019 static void cxgb_netpoll(struct net_device *dev)
2020 {
2021         struct adapter *adapter = dev->priv;
2022         struct sge_qset *qs = dev2qset(dev);
2023
2024         t3_intr_handler(adapter, qs->rspq.polling) (adapter->pdev->irq,
2025                                                     adapter);
2026 }
2027 #endif
2028
2029 /*
2030  * Periodic accumulation of MAC statistics.
2031  */
2032 static void mac_stats_update(struct adapter *adapter)
2033 {
2034         int i;
2035
2036         for_each_port(adapter, i) {
2037                 struct net_device *dev = adapter->port[i];
2038                 struct port_info *p = netdev_priv(dev);
2039
2040                 if (netif_running(dev)) {
2041                         spin_lock(&adapter->stats_lock);
2042                         t3_mac_update_stats(&p->mac);
2043                         spin_unlock(&adapter->stats_lock);
2044                 }
2045         }
2046 }
2047
2048 static void check_link_status(struct adapter *adapter)
2049 {
2050         int i;
2051
2052         for_each_port(adapter, i) {
2053                 struct net_device *dev = adapter->port[i];
2054                 struct port_info *p = netdev_priv(dev);
2055
2056                 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2057                         t3_link_changed(adapter, i);
2058         }
2059 }
2060
2061 static void t3_adap_check_task(struct work_struct *work)
2062 {
2063         struct adapter *adapter = container_of(work, struct adapter,
2064                                                adap_check_task.work);
2065         const struct adapter_params *p = &adapter->params;
2066
2067         adapter->check_task_cnt++;
2068
2069         /* Check link status for PHYs without interrupts */
2070         if (p->linkpoll_period)
2071                 check_link_status(adapter);
2072
2073         /* Accumulate MAC stats if needed */
2074         if (!p->linkpoll_period ||
2075             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2076             p->stats_update_period) {
2077                 mac_stats_update(adapter);
2078                 adapter->check_task_cnt = 0;
2079         }
2080
2081         /* Schedule the next check update if any port is active. */
2082         spin_lock(&adapter->work_lock);
2083         if (adapter->open_device_map & PORT_MASK)
2084                 schedule_chk_task(adapter);
2085         spin_unlock(&adapter->work_lock);
2086 }
2087
2088 /*
2089  * Processes external (PHY) interrupts in process context.
2090  */
2091 static void ext_intr_task(struct work_struct *work)
2092 {
2093         struct adapter *adapter = container_of(work, struct adapter,
2094                                                ext_intr_handler_task);
2095
2096         t3_phy_intr_handler(adapter);
2097
2098         /* Now reenable external interrupts */
2099         spin_lock_irq(&adapter->work_lock);
2100         if (adapter->slow_intr_mask) {
2101                 adapter->slow_intr_mask |= F_T3DBG;
2102                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2103                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2104                              adapter->slow_intr_mask);
2105         }
2106         spin_unlock_irq(&adapter->work_lock);
2107 }
2108
2109 /*
2110  * Interrupt-context handler for external (PHY) interrupts.
2111  */
2112 void t3_os_ext_intr_handler(struct adapter *adapter)
2113 {
2114         /*
2115          * Schedule a task to handle external interrupts as they may be slow
2116          * and we use a mutex to protect MDIO registers.  We disable PHY
2117          * interrupts in the meantime and let the task reenable them when
2118          * it's done.
2119          */
2120         spin_lock(&adapter->work_lock);
2121         if (adapter->slow_intr_mask) {
2122                 adapter->slow_intr_mask &= ~F_T3DBG;
2123                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2124                              adapter->slow_intr_mask);
2125                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2126         }
2127         spin_unlock(&adapter->work_lock);
2128 }
2129
2130 void t3_fatal_err(struct adapter *adapter)
2131 {
2132         unsigned int fw_status[4];
2133
2134         if (adapter->flags & FULL_INIT_DONE) {
2135                 t3_sge_stop(adapter);
2136                 t3_intr_disable(adapter);
2137         }
2138         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2139         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2140                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2141                          fw_status[0], fw_status[1],
2142                          fw_status[2], fw_status[3]);
2143
2144 }
2145
2146 static int __devinit cxgb_enable_msix(struct adapter *adap)
2147 {
2148         struct msix_entry entries[SGE_QSETS + 1];
2149         int i, err;
2150
2151         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2152                 entries[i].entry = i;
2153
2154         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2155         if (!err) {
2156                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2157                         adap->msix_info[i].vec = entries[i].vector;
2158         } else if (err > 0)
2159                 dev_info(&adap->pdev->dev,
2160                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2161         return err;
2162 }
2163
2164 static void __devinit print_port_info(struct adapter *adap,
2165                                       const struct adapter_info *ai)
2166 {
2167         static const char *pci_variant[] = {
2168                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2169         };
2170
2171         int i;
2172         char buf[80];
2173
2174         if (is_pcie(adap))
2175                 snprintf(buf, sizeof(buf), "%s x%d",
2176                          pci_variant[adap->params.pci.variant],
2177                          adap->params.pci.width);
2178         else
2179                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2180                          pci_variant[adap->params.pci.variant],
2181                          adap->params.pci.speed, adap->params.pci.width);
2182
2183         for_each_port(adap, i) {
2184                 struct net_device *dev = adap->port[i];
2185                 const struct port_info *pi = netdev_priv(dev);
2186
2187                 if (!test_bit(i, &adap->registered_device_map))
2188                         continue;
2189                 printk(KERN_INFO "%s: %s %s RNIC (rev %d) %s%s\n",
2190                        dev->name, ai->desc, pi->port_type->desc,
2191                        adap->params.rev, buf,
2192                        (adap->flags & USING_MSIX) ? " MSI-X" :
2193                        (adap->flags & USING_MSI) ? " MSI" : "");
2194                 if (adap->name == dev->name && adap->params.vpd.mclk)
2195                         printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2196                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2197                                t3_mc7_size(&adap->pmtx) >> 20,
2198                                t3_mc7_size(&adap->pmrx) >> 20);
2199         }
2200 }
2201
2202 static int __devinit init_one(struct pci_dev *pdev,
2203                               const struct pci_device_id *ent)
2204 {
2205         static int version_printed;
2206
2207         int i, err, pci_using_dac = 0;
2208         unsigned long mmio_start, mmio_len;
2209         const struct adapter_info *ai;
2210         struct adapter *adapter = NULL;
2211         struct port_info *pi;
2212
2213         if (!version_printed) {
2214                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2215                 ++version_printed;
2216         }
2217
2218         if (!cxgb3_wq) {
2219                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2220                 if (!cxgb3_wq) {
2221                         printk(KERN_ERR DRV_NAME
2222                                ": cannot initialize work queue\n");
2223                         return -ENOMEM;
2224                 }
2225         }
2226
2227         err = pci_request_regions(pdev, DRV_NAME);
2228         if (err) {
2229                 /* Just info, some other driver may have claimed the device. */
2230                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2231                 return err;
2232         }
2233
2234         err = pci_enable_device(pdev);
2235         if (err) {
2236                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2237                 goto out_release_regions;
2238         }
2239
2240         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2241                 pci_using_dac = 1;
2242                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2243                 if (err) {
2244                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2245                                "coherent allocations\n");
2246                         goto out_disable_device;
2247                 }
2248         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2249                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2250                 goto out_disable_device;
2251         }
2252
2253         pci_set_master(pdev);
2254
2255         mmio_start = pci_resource_start(pdev, 0);
2256         mmio_len = pci_resource_len(pdev, 0);
2257         ai = t3_get_adapter_info(ent->driver_data);
2258
2259         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2260         if (!adapter) {
2261                 err = -ENOMEM;
2262                 goto out_disable_device;
2263         }
2264
2265         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2266         if (!adapter->regs) {
2267                 dev_err(&pdev->dev, "cannot map device registers\n");
2268                 err = -ENOMEM;
2269                 goto out_free_adapter;
2270         }
2271
2272         adapter->pdev = pdev;
2273         adapter->name = pci_name(pdev);
2274         adapter->msg_enable = dflt_msg_enable;
2275         adapter->mmio_len = mmio_len;
2276
2277         mutex_init(&adapter->mdio_lock);
2278         spin_lock_init(&adapter->work_lock);
2279         spin_lock_init(&adapter->stats_lock);
2280
2281         INIT_LIST_HEAD(&adapter->adapter_list);
2282         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2283         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2284
2285         for (i = 0; i < ai->nports; ++i) {
2286                 struct net_device *netdev;
2287
2288                 netdev = alloc_etherdev(sizeof(struct port_info));
2289                 if (!netdev) {
2290                         err = -ENOMEM;
2291                         goto out_free_dev;
2292                 }
2293
2294                 SET_MODULE_OWNER(netdev);
2295                 SET_NETDEV_DEV(netdev, &pdev->dev);
2296
2297                 adapter->port[i] = netdev;
2298                 pi = netdev_priv(netdev);
2299                 pi->rx_csum_offload = 1;
2300                 pi->nqsets = 1;
2301                 pi->first_qset = i;
2302                 pi->activity = 0;
2303                 pi->port_id = i;
2304                 netif_carrier_off(netdev);
2305                 netdev->irq = pdev->irq;
2306                 netdev->mem_start = mmio_start;
2307                 netdev->mem_end = mmio_start + mmio_len - 1;
2308                 netdev->priv = adapter;
2309                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2310                 netdev->features |= NETIF_F_LLTX;
2311                 if (pci_using_dac)
2312                         netdev->features |= NETIF_F_HIGHDMA;
2313
2314                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2315                 netdev->vlan_rx_register = vlan_rx_register;
2316                 netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
2317
2318                 netdev->open = cxgb_open;
2319                 netdev->stop = cxgb_close;
2320                 netdev->hard_start_xmit = t3_eth_xmit;
2321                 netdev->get_stats = cxgb_get_stats;
2322                 netdev->set_multicast_list = cxgb_set_rxmode;
2323                 netdev->do_ioctl = cxgb_ioctl;
2324                 netdev->change_mtu = cxgb_change_mtu;
2325                 netdev->set_mac_address = cxgb_set_mac_addr;
2326 #ifdef CONFIG_NET_POLL_CONTROLLER
2327                 netdev->poll_controller = cxgb_netpoll;
2328 #endif
2329                 netdev->weight = 64;
2330
2331                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2332         }
2333
2334         pci_set_drvdata(pdev, adapter->port[0]);
2335         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2336                 err = -ENODEV;
2337                 goto out_free_dev;
2338         }
2339
2340         /*
2341          * The card is now ready to go.  If any errors occur during device
2342          * registration we do not fail the whole card but rather proceed only
2343          * with the ports we manage to register successfully.  However we must
2344          * register at least one net device.
2345          */
2346         for_each_port(adapter, i) {
2347                 err = register_netdev(adapter->port[i]);
2348                 if (err)
2349                         dev_warn(&pdev->dev,
2350                                  "cannot register net device %s, skipping\n",
2351                                  adapter->port[i]->name);
2352                 else {
2353                         /*
2354                          * Change the name we use for messages to the name of
2355                          * the first successfully registered interface.
2356                          */
2357                         if (!adapter->registered_device_map)
2358                                 adapter->name = adapter->port[i]->name;
2359
2360                         __set_bit(i, &adapter->registered_device_map);
2361                 }
2362         }
2363         if (!adapter->registered_device_map) {
2364                 dev_err(&pdev->dev, "could not register any net devices\n");
2365                 goto out_free_dev;
2366         }
2367
2368         /* Driver's ready. Reflect it on LEDs */
2369         t3_led_ready(adapter);
2370
2371         if (is_offload(adapter)) {
2372                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2373                 cxgb3_adapter_ofld(adapter);
2374         }
2375
2376         /* See what interrupts we'll be using */
2377         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2378                 adapter->flags |= USING_MSIX;
2379         else if (msi > 0 && pci_enable_msi(pdev) == 0)
2380                 adapter->flags |= USING_MSI;
2381
2382         err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
2383                                  &cxgb3_attr_group);
2384
2385         print_port_info(adapter, ai);
2386         return 0;
2387
2388 out_free_dev:
2389         iounmap(adapter->regs);
2390         for (i = ai->nports - 1; i >= 0; --i)
2391                 if (adapter->port[i])
2392                         free_netdev(adapter->port[i]);
2393
2394 out_free_adapter:
2395         kfree(adapter);
2396
2397 out_disable_device:
2398         pci_disable_device(pdev);
2399 out_release_regions:
2400         pci_release_regions(pdev);
2401         pci_set_drvdata(pdev, NULL);
2402         return err;
2403 }
2404
2405 static void __devexit remove_one(struct pci_dev *pdev)
2406 {
2407         struct net_device *dev = pci_get_drvdata(pdev);
2408
2409         if (dev) {
2410                 int i;
2411                 struct adapter *adapter = dev->priv;
2412
2413                 t3_sge_stop(adapter);
2414                 sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
2415                                    &cxgb3_attr_group);
2416
2417                 for_each_port(adapter, i)
2418                     if (test_bit(i, &adapter->registered_device_map))
2419                         unregister_netdev(adapter->port[i]);
2420
2421                 if (is_offload(adapter)) {
2422                         cxgb3_adapter_unofld(adapter);
2423                         if (test_bit(OFFLOAD_DEVMAP_BIT,
2424                                      &adapter->open_device_map))
2425                                 offload_close(&adapter->tdev);
2426                 }
2427
2428                 t3_free_sge_resources(adapter);
2429                 cxgb_disable_msi(adapter);
2430
2431                 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2432                         if (adapter->dummy_netdev[i]) {
2433                                 free_netdev(adapter->dummy_netdev[i]);
2434                                 adapter->dummy_netdev[i] = NULL;
2435                         }
2436
2437                 for_each_port(adapter, i)
2438                         if (adapter->port[i])
2439                                 free_netdev(adapter->port[i]);
2440
2441                 iounmap(adapter->regs);
2442                 kfree(adapter);
2443                 pci_release_regions(pdev);
2444                 pci_disable_device(pdev);
2445                 pci_set_drvdata(pdev, NULL);
2446         }
2447 }
2448
2449 static struct pci_driver driver = {
2450         .name = DRV_NAME,
2451         .id_table = cxgb3_pci_tbl,
2452         .probe = init_one,
2453         .remove = __devexit_p(remove_one),
2454 };
2455
2456 static int __init cxgb3_init_module(void)
2457 {
2458         int ret;
2459
2460         cxgb3_offload_init();
2461
2462         ret = pci_register_driver(&driver);
2463         return ret;
2464 }
2465
2466 static void __exit cxgb3_cleanup_module(void)
2467 {
2468         pci_unregister_driver(&driver);
2469         if (cxgb3_wq)
2470                 destroy_workqueue(cxgb3_wq);
2471 }
2472
2473 module_init(cxgb3_init_module);
2474 module_exit(cxgb3_cleanup_module);