cxgb3: set hard_xmit in the netdev_ops
[pandora-kernel.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189         struct port_info *pi = netdev_priv(dev);
190         struct cmac *mac = &pi->mac;
191
192         /* Skip changes from disabled ports. */
193         if (!netif_running(dev))
194                 return;
195
196         if (link_stat != netif_carrier_ok(dev)) {
197                 if (link_stat) {
198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
199                         netif_carrier_on(dev);
200                 } else {
201                         netif_carrier_off(dev);
202                         pi->phy.ops->power_down(&pi->phy, 1);
203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
204                         t3_link_start(&pi->phy, mac, &pi->link_config);
205                 }
206
207                 link_report(dev);
208         }
209 }
210
211 /**
212  *      t3_os_phymod_changed - handle PHY module changes
213  *      @phy: the PHY reporting the module change
214  *      @mod_type: new module type
215  *
216  *      This is the OS-dependent handler for PHY module changes.  It is
217  *      invoked when a PHY module is removed or inserted for any OS-specific
218  *      processing.
219  */
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
221 {
222         static const char *mod_str[] = {
223                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
224         };
225
226         const struct net_device *dev = adap->port[port_id];
227         const struct port_info *pi = netdev_priv(dev);
228
229         if (pi->phy.modtype == phy_modtype_none)
230                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
231         else
232                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233                        mod_str[pi->phy.modtype]);
234 }
235
236 static void cxgb_set_rxmode(struct net_device *dev)
237 {
238         struct t3_rx_mode rm;
239         struct port_info *pi = netdev_priv(dev);
240
241         init_rx_mode(&rm, dev, dev->mc_list);
242         t3_mac_set_rx_mode(&pi->mac, &rm);
243 }
244
245 /**
246  *      link_start - enable a port
247  *      @dev: the device to enable
248  *
249  *      Performs the MAC and PHY actions needed to enable a port.
250  */
251 static void link_start(struct net_device *dev)
252 {
253         struct t3_rx_mode rm;
254         struct port_info *pi = netdev_priv(dev);
255         struct cmac *mac = &pi->mac;
256
257         init_rx_mode(&rm, dev, dev->mc_list);
258         t3_mac_reset(mac);
259         t3_mac_set_mtu(mac, dev->mtu);
260         t3_mac_set_address(mac, 0, dev->dev_addr);
261         t3_mac_set_rx_mode(mac, &rm);
262         t3_link_start(&pi->phy, mac, &pi->link_config);
263         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
264 }
265
266 static inline void cxgb_disable_msi(struct adapter *adapter)
267 {
268         if (adapter->flags & USING_MSIX) {
269                 pci_disable_msix(adapter->pdev);
270                 adapter->flags &= ~USING_MSIX;
271         } else if (adapter->flags & USING_MSI) {
272                 pci_disable_msi(adapter->pdev);
273                 adapter->flags &= ~USING_MSI;
274         }
275 }
276
277 /*
278  * Interrupt handler for asynchronous events used with MSI-X.
279  */
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
281 {
282         t3_slow_intr_handler(cookie);
283         return IRQ_HANDLED;
284 }
285
286 /*
287  * Name the MSI-X interrupts.
288  */
289 static void name_msix_vecs(struct adapter *adap)
290 {
291         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
292
293         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294         adap->msix_info[0].desc[n] = 0;
295
296         for_each_port(adap, j) {
297                 struct net_device *d = adap->port[j];
298                 const struct port_info *pi = netdev_priv(d);
299
300                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301                         snprintf(adap->msix_info[msi_idx].desc, n,
302                                  "%s-%d", d->name, pi->first_qset + i);
303                         adap->msix_info[msi_idx].desc[n] = 0;
304                 }
305         }
306 }
307
308 static int request_msix_data_irqs(struct adapter *adap)
309 {
310         int i, j, err, qidx = 0;
311
312         for_each_port(adap, i) {
313                 int nqsets = adap2pinfo(adap, i)->nqsets;
314
315                 for (j = 0; j < nqsets; ++j) {
316                         err = request_irq(adap->msix_info[qidx + 1].vec,
317                                           t3_intr_handler(adap,
318                                                           adap->sge.qs[qidx].
319                                                           rspq.polling), 0,
320                                           adap->msix_info[qidx + 1].desc,
321                                           &adap->sge.qs[qidx]);
322                         if (err) {
323                                 while (--qidx >= 0)
324                                         free_irq(adap->msix_info[qidx + 1].vec,
325                                                  &adap->sge.qs[qidx]);
326                                 return err;
327                         }
328                         qidx++;
329                 }
330         }
331         return 0;
332 }
333
334 static void free_irq_resources(struct adapter *adapter)
335 {
336         if (adapter->flags & USING_MSIX) {
337                 int i, n = 0;
338
339                 free_irq(adapter->msix_info[0].vec, adapter);
340                 for_each_port(adapter, i)
341                     n += adap2pinfo(adapter, i)->nqsets;
342
343                 for (i = 0; i < n; ++i)
344                         free_irq(adapter->msix_info[i + 1].vec,
345                                  &adapter->sge.qs[i]);
346         } else
347                 free_irq(adapter->pdev->irq, adapter);
348 }
349
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
351                               unsigned long n)
352 {
353         int attempts = 5;
354
355         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
356                 if (!--attempts)
357                         return -ETIMEDOUT;
358                 msleep(10);
359         }
360         return 0;
361 }
362
363 static int init_tp_parity(struct adapter *adap)
364 {
365         int i;
366         struct sk_buff *skb;
367         struct cpl_set_tcb_field *greq;
368         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
369
370         t3_tp_set_offload_mode(adap, 1);
371
372         for (i = 0; i < 16; i++) {
373                 struct cpl_smt_write_req *req;
374
375                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377                 memset(req, 0, sizeof(*req));
378                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
380                 req->iff = i;
381                 t3_mgmt_tx(adap, skb);
382         }
383
384         for (i = 0; i < 2048; i++) {
385                 struct cpl_l2t_write_req *req;
386
387                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389                 memset(req, 0, sizeof(*req));
390                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392                 req->params = htonl(V_L2T_W_IDX(i));
393                 t3_mgmt_tx(adap, skb);
394         }
395
396         for (i = 0; i < 2048; i++) {
397                 struct cpl_rte_write_req *req;
398
399                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401                 memset(req, 0, sizeof(*req));
402                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405                 t3_mgmt_tx(adap, skb);
406         }
407
408         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410         memset(greq, 0, sizeof(*greq));
411         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413         greq->mask = cpu_to_be64(1);
414         t3_mgmt_tx(adap, skb);
415
416         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417         t3_tp_set_offload_mode(adap, 0);
418         return i;
419 }
420
421 /**
422  *      setup_rss - configure RSS
423  *      @adap: the adapter
424  *
425  *      Sets up RSS to distribute packets to multiple receive queues.  We
426  *      configure the RSS CPU lookup table to distribute to the number of HW
427  *      receive queues, and the response queue lookup table to narrow that
428  *      down to the response queues actually configured for each port.
429  *      We always configure the RSS mapping for two ports since the mapping
430  *      table has plenty of entries.
431  */
432 static void setup_rss(struct adapter *adap)
433 {
434         int i;
435         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437         u8 cpus[SGE_QSETS + 1];
438         u16 rspq_map[RSS_TABLE_SIZE];
439
440         for (i = 0; i < SGE_QSETS; ++i)
441                 cpus[i] = i;
442         cpus[SGE_QSETS] = 0xff; /* terminator */
443
444         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445                 rspq_map[i] = i % nq0;
446                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
447         }
448
449         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
452 }
453
454 static void init_napi(struct adapter *adap)
455 {
456         int i;
457
458         for (i = 0; i < SGE_QSETS; i++) {
459                 struct sge_qset *qs = &adap->sge.qs[i];
460
461                 if (qs->adap)
462                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
463                                        64);
464         }
465
466         /*
467          * netif_napi_add() can be called only once per napi_struct because it
468          * adds each new napi_struct to a list.  Be careful not to call it a
469          * second time, e.g., during EEH recovery, by making a note of it.
470          */
471         adap->flags |= NAPI_INIT;
472 }
473
474 /*
475  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
476  * both netdevices representing interfaces and the dummy ones for the extra
477  * queues.
478  */
479 static void quiesce_rx(struct adapter *adap)
480 {
481         int i;
482
483         for (i = 0; i < SGE_QSETS; i++)
484                 if (adap->sge.qs[i].adap)
485                         napi_disable(&adap->sge.qs[i].napi);
486 }
487
488 static void enable_all_napi(struct adapter *adap)
489 {
490         int i;
491         for (i = 0; i < SGE_QSETS; i++)
492                 if (adap->sge.qs[i].adap)
493                         napi_enable(&adap->sge.qs[i].napi);
494 }
495
496 /**
497  *      set_qset_lro - Turn a queue set's LRO capability on and off
498  *      @dev: the device the qset is attached to
499  *      @qset_idx: the queue set index
500  *      @val: the LRO switch
501  *
502  *      Sets LRO on or off for a particular queue set.
503  *      the device's features flag is updated to reflect the LRO
504  *      capability when all queues belonging to the device are
505  *      in the same state.
506  */
507 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
508 {
509         struct port_info *pi = netdev_priv(dev);
510         struct adapter *adapter = pi->adapter;
511         int i, lro_on = 1;
512
513         adapter->params.sge.qset[qset_idx].lro = !!val;
514         adapter->sge.qs[qset_idx].lro_enabled = !!val;
515
516         /* let ethtool report LRO on only if all queues are LRO enabled */
517         for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518                 lro_on &= adapter->params.sge.qset[i].lro;
519
520         if (lro_on)
521                 dev->features |= NETIF_F_LRO;
522         else
523                 dev->features &= ~NETIF_F_LRO;
524 }
525
526 /**
527  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
528  *      @adap: the adapter
529  *
530  *      Determines how many sets of SGE queues to use and initializes them.
531  *      We support multiple queue sets per port if we have MSI-X, otherwise
532  *      just one queue set per port.
533  */
534 static int setup_sge_qsets(struct adapter *adap)
535 {
536         int i, j, err, irq_idx = 0, qset_idx = 0;
537         unsigned int ntxq = SGE_TXQ_PER_SET;
538
539         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
540                 irq_idx = -1;
541
542         for_each_port(adap, i) {
543                 struct net_device *dev = adap->port[i];
544                 struct port_info *pi = netdev_priv(dev);
545
546                 pi->qs = &adap->sge.qs[pi->first_qset];
547                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
548                      ++j, ++qset_idx) {
549                         set_qset_lro(dev, qset_idx, pi->rx_csum_offload);
550                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
551                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
552                                                              irq_idx,
553                                 &adap->params.sge.qset[qset_idx], ntxq, dev);
554                         if (err) {
555                                 t3_stop_sge_timers(adap);
556                                 t3_free_sge_resources(adap);
557                                 return err;
558                         }
559                 }
560         }
561
562         return 0;
563 }
564
565 static ssize_t attr_show(struct device *d, char *buf,
566                          ssize_t(*format) (struct net_device *, char *))
567 {
568         ssize_t len;
569
570         /* Synchronize with ioctls that may shut down the device */
571         rtnl_lock();
572         len = (*format) (to_net_dev(d), buf);
573         rtnl_unlock();
574         return len;
575 }
576
577 static ssize_t attr_store(struct device *d,
578                           const char *buf, size_t len,
579                           ssize_t(*set) (struct net_device *, unsigned int),
580                           unsigned int min_val, unsigned int max_val)
581 {
582         char *endp;
583         ssize_t ret;
584         unsigned int val;
585
586         if (!capable(CAP_NET_ADMIN))
587                 return -EPERM;
588
589         val = simple_strtoul(buf, &endp, 0);
590         if (endp == buf || val < min_val || val > max_val)
591                 return -EINVAL;
592
593         rtnl_lock();
594         ret = (*set) (to_net_dev(d), val);
595         if (!ret)
596                 ret = len;
597         rtnl_unlock();
598         return ret;
599 }
600
601 #define CXGB3_SHOW(name, val_expr) \
602 static ssize_t format_##name(struct net_device *dev, char *buf) \
603 { \
604         struct port_info *pi = netdev_priv(dev); \
605         struct adapter *adap = pi->adapter; \
606         return sprintf(buf, "%u\n", val_expr); \
607 } \
608 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
609                            char *buf) \
610 { \
611         return attr_show(d, buf, format_##name); \
612 }
613
614 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
615 {
616         struct port_info *pi = netdev_priv(dev);
617         struct adapter *adap = pi->adapter;
618         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
619
620         if (adap->flags & FULL_INIT_DONE)
621                 return -EBUSY;
622         if (val && adap->params.rev == 0)
623                 return -EINVAL;
624         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
625             min_tids)
626                 return -EINVAL;
627         adap->params.mc5.nfilters = val;
628         return 0;
629 }
630
631 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
632                               const char *buf, size_t len)
633 {
634         return attr_store(d, buf, len, set_nfilters, 0, ~0);
635 }
636
637 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
638 {
639         struct port_info *pi = netdev_priv(dev);
640         struct adapter *adap = pi->adapter;
641
642         if (adap->flags & FULL_INIT_DONE)
643                 return -EBUSY;
644         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
645             MC5_MIN_TIDS)
646                 return -EINVAL;
647         adap->params.mc5.nservers = val;
648         return 0;
649 }
650
651 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
652                               const char *buf, size_t len)
653 {
654         return attr_store(d, buf, len, set_nservers, 0, ~0);
655 }
656
657 #define CXGB3_ATTR_R(name, val_expr) \
658 CXGB3_SHOW(name, val_expr) \
659 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
660
661 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
662 CXGB3_SHOW(name, val_expr) \
663 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
664
665 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
666 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
667 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
668
669 static struct attribute *cxgb3_attrs[] = {
670         &dev_attr_cam_size.attr,
671         &dev_attr_nfilters.attr,
672         &dev_attr_nservers.attr,
673         NULL
674 };
675
676 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
677
678 static ssize_t tm_attr_show(struct device *d,
679                             char *buf, int sched)
680 {
681         struct port_info *pi = netdev_priv(to_net_dev(d));
682         struct adapter *adap = pi->adapter;
683         unsigned int v, addr, bpt, cpt;
684         ssize_t len;
685
686         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
687         rtnl_lock();
688         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
689         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
690         if (sched & 1)
691                 v >>= 16;
692         bpt = (v >> 8) & 0xff;
693         cpt = v & 0xff;
694         if (!cpt)
695                 len = sprintf(buf, "disabled\n");
696         else {
697                 v = (adap->params.vpd.cclk * 1000) / cpt;
698                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
699         }
700         rtnl_unlock();
701         return len;
702 }
703
704 static ssize_t tm_attr_store(struct device *d,
705                              const char *buf, size_t len, int sched)
706 {
707         struct port_info *pi = netdev_priv(to_net_dev(d));
708         struct adapter *adap = pi->adapter;
709         unsigned int val;
710         char *endp;
711         ssize_t ret;
712
713         if (!capable(CAP_NET_ADMIN))
714                 return -EPERM;
715
716         val = simple_strtoul(buf, &endp, 0);
717         if (endp == buf || val > 10000000)
718                 return -EINVAL;
719
720         rtnl_lock();
721         ret = t3_config_sched(adap, val, sched);
722         if (!ret)
723                 ret = len;
724         rtnl_unlock();
725         return ret;
726 }
727
728 #define TM_ATTR(name, sched) \
729 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
730                            char *buf) \
731 { \
732         return tm_attr_show(d, buf, sched); \
733 } \
734 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
735                             const char *buf, size_t len) \
736 { \
737         return tm_attr_store(d, buf, len, sched); \
738 } \
739 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
740
741 TM_ATTR(sched0, 0);
742 TM_ATTR(sched1, 1);
743 TM_ATTR(sched2, 2);
744 TM_ATTR(sched3, 3);
745 TM_ATTR(sched4, 4);
746 TM_ATTR(sched5, 5);
747 TM_ATTR(sched6, 6);
748 TM_ATTR(sched7, 7);
749
750 static struct attribute *offload_attrs[] = {
751         &dev_attr_sched0.attr,
752         &dev_attr_sched1.attr,
753         &dev_attr_sched2.attr,
754         &dev_attr_sched3.attr,
755         &dev_attr_sched4.attr,
756         &dev_attr_sched5.attr,
757         &dev_attr_sched6.attr,
758         &dev_attr_sched7.attr,
759         NULL
760 };
761
762 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
763
764 /*
765  * Sends an sk_buff to an offload queue driver
766  * after dealing with any active network taps.
767  */
768 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
769 {
770         int ret;
771
772         local_bh_disable();
773         ret = t3_offload_tx(tdev, skb);
774         local_bh_enable();
775         return ret;
776 }
777
778 static int write_smt_entry(struct adapter *adapter, int idx)
779 {
780         struct cpl_smt_write_req *req;
781         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
782
783         if (!skb)
784                 return -ENOMEM;
785
786         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
787         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
788         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
789         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
790         req->iff = idx;
791         memset(req->src_mac1, 0, sizeof(req->src_mac1));
792         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
793         skb->priority = 1;
794         offload_tx(&adapter->tdev, skb);
795         return 0;
796 }
797
798 static int init_smt(struct adapter *adapter)
799 {
800         int i;
801
802         for_each_port(adapter, i)
803             write_smt_entry(adapter, i);
804         return 0;
805 }
806
807 static void init_port_mtus(struct adapter *adapter)
808 {
809         unsigned int mtus = adapter->port[0]->mtu;
810
811         if (adapter->port[1])
812                 mtus |= adapter->port[1]->mtu << 16;
813         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
814 }
815
816 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
817                               int hi, int port)
818 {
819         struct sk_buff *skb;
820         struct mngt_pktsched_wr *req;
821         int ret;
822
823         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
824         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
825         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
826         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
827         req->sched = sched;
828         req->idx = qidx;
829         req->min = lo;
830         req->max = hi;
831         req->binding = port;
832         ret = t3_mgmt_tx(adap, skb);
833
834         return ret;
835 }
836
837 static int bind_qsets(struct adapter *adap)
838 {
839         int i, j, err = 0;
840
841         for_each_port(adap, i) {
842                 const struct port_info *pi = adap2pinfo(adap, i);
843
844                 for (j = 0; j < pi->nqsets; ++j) {
845                         int ret = send_pktsched_cmd(adap, 1,
846                                                     pi->first_qset + j, -1,
847                                                     -1, i);
848                         if (ret)
849                                 err = ret;
850                 }
851         }
852
853         return err;
854 }
855
856 #define FW_FNAME "t3fw-%d.%d.%d.bin"
857 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
858
859 static int upgrade_fw(struct adapter *adap)
860 {
861         int ret;
862         char buf[64];
863         const struct firmware *fw;
864         struct device *dev = &adap->pdev->dev;
865
866         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
867                  FW_VERSION_MINOR, FW_VERSION_MICRO);
868         ret = request_firmware(&fw, buf, dev);
869         if (ret < 0) {
870                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
871                         buf);
872                 return ret;
873         }
874         ret = t3_load_fw(adap, fw->data, fw->size);
875         release_firmware(fw);
876
877         if (ret == 0)
878                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
879                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
880         else
881                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
882                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
883
884         return ret;
885 }
886
887 static inline char t3rev2char(struct adapter *adapter)
888 {
889         char rev = 0;
890
891         switch(adapter->params.rev) {
892         case T3_REV_B:
893         case T3_REV_B2:
894                 rev = 'b';
895                 break;
896         case T3_REV_C:
897                 rev = 'c';
898                 break;
899         }
900         return rev;
901 }
902
903 static int update_tpsram(struct adapter *adap)
904 {
905         const struct firmware *tpsram;
906         char buf[64];
907         struct device *dev = &adap->pdev->dev;
908         int ret;
909         char rev;
910
911         rev = t3rev2char(adap);
912         if (!rev)
913                 return 0;
914
915         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
916                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
917
918         ret = request_firmware(&tpsram, buf, dev);
919         if (ret < 0) {
920                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
921                         buf);
922                 return ret;
923         }
924
925         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
926         if (ret)
927                 goto release_tpsram;
928
929         ret = t3_set_proto_sram(adap, tpsram->data);
930         if (ret == 0)
931                 dev_info(dev,
932                          "successful update of protocol engine "
933                          "to %d.%d.%d\n",
934                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
935         else
936                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
937                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
938         if (ret)
939                 dev_err(dev, "loading protocol SRAM failed\n");
940
941 release_tpsram:
942         release_firmware(tpsram);
943
944         return ret;
945 }
946
947 /**
948  *      cxgb_up - enable the adapter
949  *      @adapter: adapter being enabled
950  *
951  *      Called when the first port is enabled, this function performs the
952  *      actions necessary to make an adapter operational, such as completing
953  *      the initialization of HW modules, and enabling interrupts.
954  *
955  *      Must be called with the rtnl lock held.
956  */
957 static int cxgb_up(struct adapter *adap)
958 {
959         int err;
960         int must_load;
961
962         if (!(adap->flags & FULL_INIT_DONE)) {
963                 err = t3_check_fw_version(adap, &must_load);
964                 if (err == -EINVAL) {
965                         err = upgrade_fw(adap);
966                         if (err && must_load)
967                                 goto out;
968                 }
969
970                 err = t3_check_tpsram_version(adap, &must_load);
971                 if (err == -EINVAL) {
972                         err = update_tpsram(adap);
973                         if (err && must_load)
974                                 goto out;
975                 }
976
977                 /*
978                  * Clear interrupts now to catch errors if t3_init_hw fails.
979                  * We clear them again later as initialization may trigger
980                  * conditions that can interrupt.
981                  */
982                 t3_intr_clear(adap);
983
984                 err = t3_init_hw(adap, 0);
985                 if (err)
986                         goto out;
987
988                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
989                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
990
991                 err = setup_sge_qsets(adap);
992                 if (err)
993                         goto out;
994
995                 setup_rss(adap);
996                 if (!(adap->flags & NAPI_INIT))
997                         init_napi(adap);
998                 adap->flags |= FULL_INIT_DONE;
999         }
1000
1001         t3_intr_clear(adap);
1002
1003         if (adap->flags & USING_MSIX) {
1004                 name_msix_vecs(adap);
1005                 err = request_irq(adap->msix_info[0].vec,
1006                                   t3_async_intr_handler, 0,
1007                                   adap->msix_info[0].desc, adap);
1008                 if (err)
1009                         goto irq_err;
1010
1011                 err = request_msix_data_irqs(adap);
1012                 if (err) {
1013                         free_irq(adap->msix_info[0].vec, adap);
1014                         goto irq_err;
1015                 }
1016         } else if ((err = request_irq(adap->pdev->irq,
1017                                       t3_intr_handler(adap,
1018                                                       adap->sge.qs[0].rspq.
1019                                                       polling),
1020                                       (adap->flags & USING_MSI) ?
1021                                        0 : IRQF_SHARED,
1022                                       adap->name, adap)))
1023                 goto irq_err;
1024
1025         enable_all_napi(adap);
1026         t3_sge_start(adap);
1027         t3_intr_enable(adap);
1028
1029         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1030             is_offload(adap) && init_tp_parity(adap) == 0)
1031                 adap->flags |= TP_PARITY_INIT;
1032
1033         if (adap->flags & TP_PARITY_INIT) {
1034                 t3_write_reg(adap, A_TP_INT_CAUSE,
1035                              F_CMCACHEPERR | F_ARPLUTPERR);
1036                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1037         }
1038
1039         if (!(adap->flags & QUEUES_BOUND)) {
1040                 err = bind_qsets(adap);
1041                 if (err) {
1042                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1043                         t3_intr_disable(adap);
1044                         free_irq_resources(adap);
1045                         goto out;
1046                 }
1047                 adap->flags |= QUEUES_BOUND;
1048         }
1049
1050 out:
1051         return err;
1052 irq_err:
1053         CH_ERR(adap, "request_irq failed, err %d\n", err);
1054         goto out;
1055 }
1056
1057 /*
1058  * Release resources when all the ports and offloading have been stopped.
1059  */
1060 static void cxgb_down(struct adapter *adapter)
1061 {
1062         t3_sge_stop(adapter);
1063         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1064         t3_intr_disable(adapter);
1065         spin_unlock_irq(&adapter->work_lock);
1066
1067         free_irq_resources(adapter);
1068         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1069         quiesce_rx(adapter);
1070 }
1071
1072 static void schedule_chk_task(struct adapter *adap)
1073 {
1074         unsigned int timeo;
1075
1076         timeo = adap->params.linkpoll_period ?
1077             (HZ * adap->params.linkpoll_period) / 10 :
1078             adap->params.stats_update_period * HZ;
1079         if (timeo)
1080                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1081 }
1082
1083 static int offload_open(struct net_device *dev)
1084 {
1085         struct port_info *pi = netdev_priv(dev);
1086         struct adapter *adapter = pi->adapter;
1087         struct t3cdev *tdev = dev2t3cdev(dev);
1088         int adap_up = adapter->open_device_map & PORT_MASK;
1089         int err;
1090
1091         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1092                 return 0;
1093
1094         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1095                 goto out;
1096
1097         t3_tp_set_offload_mode(adapter, 1);
1098         tdev->lldev = adapter->port[0];
1099         err = cxgb3_offload_activate(adapter);
1100         if (err)
1101                 goto out;
1102
1103         init_port_mtus(adapter);
1104         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1105                      adapter->params.b_wnd,
1106                      adapter->params.rev == 0 ?
1107                      adapter->port[0]->mtu : 0xffff);
1108         init_smt(adapter);
1109
1110         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1111                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1112
1113         /* Call back all registered clients */
1114         cxgb3_add_clients(tdev);
1115
1116 out:
1117         /* restore them in case the offload module has changed them */
1118         if (err) {
1119                 t3_tp_set_offload_mode(adapter, 0);
1120                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1121                 cxgb3_set_dummy_ops(tdev);
1122         }
1123         return err;
1124 }
1125
1126 static int offload_close(struct t3cdev *tdev)
1127 {
1128         struct adapter *adapter = tdev2adap(tdev);
1129
1130         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1131                 return 0;
1132
1133         /* Call back all registered clients */
1134         cxgb3_remove_clients(tdev);
1135
1136         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1137
1138         tdev->lldev = NULL;
1139         cxgb3_set_dummy_ops(tdev);
1140         t3_tp_set_offload_mode(adapter, 0);
1141         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1142
1143         if (!adapter->open_device_map)
1144                 cxgb_down(adapter);
1145
1146         cxgb3_offload_deactivate(adapter);
1147         return 0;
1148 }
1149
1150 static int cxgb_open(struct net_device *dev)
1151 {
1152         struct port_info *pi = netdev_priv(dev);
1153         struct adapter *adapter = pi->adapter;
1154         int other_ports = adapter->open_device_map & PORT_MASK;
1155         int err;
1156
1157         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1158                 return err;
1159
1160         set_bit(pi->port_id, &adapter->open_device_map);
1161         if (is_offload(adapter) && !ofld_disable) {
1162                 err = offload_open(dev);
1163                 if (err)
1164                         printk(KERN_WARNING
1165                                "Could not initialize offload capabilities\n");
1166         }
1167
1168         link_start(dev);
1169         t3_port_intr_enable(adapter, pi->port_id);
1170         netif_start_queue(dev);
1171         if (!other_ports)
1172                 schedule_chk_task(adapter);
1173
1174         return 0;
1175 }
1176
1177 static int cxgb_close(struct net_device *dev)
1178 {
1179         struct port_info *pi = netdev_priv(dev);
1180         struct adapter *adapter = pi->adapter;
1181
1182         t3_port_intr_disable(adapter, pi->port_id);
1183         netif_stop_queue(dev);
1184         pi->phy.ops->power_down(&pi->phy, 1);
1185         netif_carrier_off(dev);
1186         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1187
1188         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1189         clear_bit(pi->port_id, &adapter->open_device_map);
1190         spin_unlock_irq(&adapter->work_lock);
1191
1192         if (!(adapter->open_device_map & PORT_MASK))
1193                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1194                                                   &adapter->adap_check_task);
1195
1196         if (!adapter->open_device_map)
1197                 cxgb_down(adapter);
1198
1199         return 0;
1200 }
1201
1202 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1203 {
1204         struct port_info *pi = netdev_priv(dev);
1205         struct adapter *adapter = pi->adapter;
1206         struct net_device_stats *ns = &pi->netstats;
1207         const struct mac_stats *pstats;
1208
1209         spin_lock(&adapter->stats_lock);
1210         pstats = t3_mac_update_stats(&pi->mac);
1211         spin_unlock(&adapter->stats_lock);
1212
1213         ns->tx_bytes = pstats->tx_octets;
1214         ns->tx_packets = pstats->tx_frames;
1215         ns->rx_bytes = pstats->rx_octets;
1216         ns->rx_packets = pstats->rx_frames;
1217         ns->multicast = pstats->rx_mcast_frames;
1218
1219         ns->tx_errors = pstats->tx_underrun;
1220         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1221             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1222             pstats->rx_fifo_ovfl;
1223
1224         /* detailed rx_errors */
1225         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1226         ns->rx_over_errors = 0;
1227         ns->rx_crc_errors = pstats->rx_fcs_errs;
1228         ns->rx_frame_errors = pstats->rx_symbol_errs;
1229         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1230         ns->rx_missed_errors = pstats->rx_cong_drops;
1231
1232         /* detailed tx_errors */
1233         ns->tx_aborted_errors = 0;
1234         ns->tx_carrier_errors = 0;
1235         ns->tx_fifo_errors = pstats->tx_underrun;
1236         ns->tx_heartbeat_errors = 0;
1237         ns->tx_window_errors = 0;
1238         return ns;
1239 }
1240
1241 static u32 get_msglevel(struct net_device *dev)
1242 {
1243         struct port_info *pi = netdev_priv(dev);
1244         struct adapter *adapter = pi->adapter;
1245
1246         return adapter->msg_enable;
1247 }
1248
1249 static void set_msglevel(struct net_device *dev, u32 val)
1250 {
1251         struct port_info *pi = netdev_priv(dev);
1252         struct adapter *adapter = pi->adapter;
1253
1254         adapter->msg_enable = val;
1255 }
1256
1257 static char stats_strings[][ETH_GSTRING_LEN] = {
1258         "TxOctetsOK         ",
1259         "TxFramesOK         ",
1260         "TxMulticastFramesOK",
1261         "TxBroadcastFramesOK",
1262         "TxPauseFrames      ",
1263         "TxUnderrun         ",
1264         "TxExtUnderrun      ",
1265
1266         "TxFrames64         ",
1267         "TxFrames65To127    ",
1268         "TxFrames128To255   ",
1269         "TxFrames256To511   ",
1270         "TxFrames512To1023  ",
1271         "TxFrames1024To1518 ",
1272         "TxFrames1519ToMax  ",
1273
1274         "RxOctetsOK         ",
1275         "RxFramesOK         ",
1276         "RxMulticastFramesOK",
1277         "RxBroadcastFramesOK",
1278         "RxPauseFrames      ",
1279         "RxFCSErrors        ",
1280         "RxSymbolErrors     ",
1281         "RxShortErrors      ",
1282         "RxJabberErrors     ",
1283         "RxLengthErrors     ",
1284         "RxFIFOoverflow     ",
1285
1286         "RxFrames64         ",
1287         "RxFrames65To127    ",
1288         "RxFrames128To255   ",
1289         "RxFrames256To511   ",
1290         "RxFrames512To1023  ",
1291         "RxFrames1024To1518 ",
1292         "RxFrames1519ToMax  ",
1293
1294         "PhyFIFOErrors      ",
1295         "TSO                ",
1296         "VLANextractions    ",
1297         "VLANinsertions     ",
1298         "TxCsumOffload      ",
1299         "RxCsumGood         ",
1300         "LroAggregated      ",
1301         "LroFlushed         ",
1302         "LroNoDesc          ",
1303         "RxDrops            ",
1304
1305         "CheckTXEnToggled   ",
1306         "CheckResets        ",
1307
1308 };
1309
1310 static int get_sset_count(struct net_device *dev, int sset)
1311 {
1312         switch (sset) {
1313         case ETH_SS_STATS:
1314                 return ARRAY_SIZE(stats_strings);
1315         default:
1316                 return -EOPNOTSUPP;
1317         }
1318 }
1319
1320 #define T3_REGMAP_SIZE (3 * 1024)
1321
1322 static int get_regs_len(struct net_device *dev)
1323 {
1324         return T3_REGMAP_SIZE;
1325 }
1326
1327 static int get_eeprom_len(struct net_device *dev)
1328 {
1329         return EEPROMSIZE;
1330 }
1331
1332 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1333 {
1334         struct port_info *pi = netdev_priv(dev);
1335         struct adapter *adapter = pi->adapter;
1336         u32 fw_vers = 0;
1337         u32 tp_vers = 0;
1338
1339         spin_lock(&adapter->stats_lock);
1340         t3_get_fw_version(adapter, &fw_vers);
1341         t3_get_tp_version(adapter, &tp_vers);
1342         spin_unlock(&adapter->stats_lock);
1343
1344         strcpy(info->driver, DRV_NAME);
1345         strcpy(info->version, DRV_VERSION);
1346         strcpy(info->bus_info, pci_name(adapter->pdev));
1347         if (!fw_vers)
1348                 strcpy(info->fw_version, "N/A");
1349         else {
1350                 snprintf(info->fw_version, sizeof(info->fw_version),
1351                          "%s %u.%u.%u TP %u.%u.%u",
1352                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1353                          G_FW_VERSION_MAJOR(fw_vers),
1354                          G_FW_VERSION_MINOR(fw_vers),
1355                          G_FW_VERSION_MICRO(fw_vers),
1356                          G_TP_VERSION_MAJOR(tp_vers),
1357                          G_TP_VERSION_MINOR(tp_vers),
1358                          G_TP_VERSION_MICRO(tp_vers));
1359         }
1360 }
1361
1362 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1363 {
1364         if (stringset == ETH_SS_STATS)
1365                 memcpy(data, stats_strings, sizeof(stats_strings));
1366 }
1367
1368 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1369                                             struct port_info *p, int idx)
1370 {
1371         int i;
1372         unsigned long tot = 0;
1373
1374         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1375                 tot += adapter->sge.qs[i].port_stats[idx];
1376         return tot;
1377 }
1378
1379 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1380                       u64 *data)
1381 {
1382         struct port_info *pi = netdev_priv(dev);
1383         struct adapter *adapter = pi->adapter;
1384         const struct mac_stats *s;
1385
1386         spin_lock(&adapter->stats_lock);
1387         s = t3_mac_update_stats(&pi->mac);
1388         spin_unlock(&adapter->stats_lock);
1389
1390         *data++ = s->tx_octets;
1391         *data++ = s->tx_frames;
1392         *data++ = s->tx_mcast_frames;
1393         *data++ = s->tx_bcast_frames;
1394         *data++ = s->tx_pause;
1395         *data++ = s->tx_underrun;
1396         *data++ = s->tx_fifo_urun;
1397
1398         *data++ = s->tx_frames_64;
1399         *data++ = s->tx_frames_65_127;
1400         *data++ = s->tx_frames_128_255;
1401         *data++ = s->tx_frames_256_511;
1402         *data++ = s->tx_frames_512_1023;
1403         *data++ = s->tx_frames_1024_1518;
1404         *data++ = s->tx_frames_1519_max;
1405
1406         *data++ = s->rx_octets;
1407         *data++ = s->rx_frames;
1408         *data++ = s->rx_mcast_frames;
1409         *data++ = s->rx_bcast_frames;
1410         *data++ = s->rx_pause;
1411         *data++ = s->rx_fcs_errs;
1412         *data++ = s->rx_symbol_errs;
1413         *data++ = s->rx_short;
1414         *data++ = s->rx_jabber;
1415         *data++ = s->rx_too_long;
1416         *data++ = s->rx_fifo_ovfl;
1417
1418         *data++ = s->rx_frames_64;
1419         *data++ = s->rx_frames_65_127;
1420         *data++ = s->rx_frames_128_255;
1421         *data++ = s->rx_frames_256_511;
1422         *data++ = s->rx_frames_512_1023;
1423         *data++ = s->rx_frames_1024_1518;
1424         *data++ = s->rx_frames_1519_max;
1425
1426         *data++ = pi->phy.fifo_errors;
1427
1428         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1429         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1430         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1431         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1432         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1433         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1434         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1435         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1436         *data++ = s->rx_cong_drops;
1437
1438         *data++ = s->num_toggled;
1439         *data++ = s->num_resets;
1440 }
1441
1442 static inline void reg_block_dump(struct adapter *ap, void *buf,
1443                                   unsigned int start, unsigned int end)
1444 {
1445         u32 *p = buf + start;
1446
1447         for (; start <= end; start += sizeof(u32))
1448                 *p++ = t3_read_reg(ap, start);
1449 }
1450
1451 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1452                      void *buf)
1453 {
1454         struct port_info *pi = netdev_priv(dev);
1455         struct adapter *ap = pi->adapter;
1456
1457         /*
1458          * Version scheme:
1459          * bits 0..9: chip version
1460          * bits 10..15: chip revision
1461          * bit 31: set for PCIe cards
1462          */
1463         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1464
1465         /*
1466          * We skip the MAC statistics registers because they are clear-on-read.
1467          * Also reading multi-register stats would need to synchronize with the
1468          * periodic mac stats accumulation.  Hard to justify the complexity.
1469          */
1470         memset(buf, 0, T3_REGMAP_SIZE);
1471         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1472         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1473         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1474         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1475         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1476         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1477                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1478         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1479                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1480 }
1481
1482 static int restart_autoneg(struct net_device *dev)
1483 {
1484         struct port_info *p = netdev_priv(dev);
1485
1486         if (!netif_running(dev))
1487                 return -EAGAIN;
1488         if (p->link_config.autoneg != AUTONEG_ENABLE)
1489                 return -EINVAL;
1490         p->phy.ops->autoneg_restart(&p->phy);
1491         return 0;
1492 }
1493
1494 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1495 {
1496         struct port_info *pi = netdev_priv(dev);
1497         struct adapter *adapter = pi->adapter;
1498         int i;
1499
1500         if (data == 0)
1501                 data = 2;
1502
1503         for (i = 0; i < data * 2; i++) {
1504                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1505                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1506                 if (msleep_interruptible(500))
1507                         break;
1508         }
1509         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1510                          F_GPIO0_OUT_VAL);
1511         return 0;
1512 }
1513
1514 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1515 {
1516         struct port_info *p = netdev_priv(dev);
1517
1518         cmd->supported = p->link_config.supported;
1519         cmd->advertising = p->link_config.advertising;
1520
1521         if (netif_carrier_ok(dev)) {
1522                 cmd->speed = p->link_config.speed;
1523                 cmd->duplex = p->link_config.duplex;
1524         } else {
1525                 cmd->speed = -1;
1526                 cmd->duplex = -1;
1527         }
1528
1529         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1530         cmd->phy_address = p->phy.addr;
1531         cmd->transceiver = XCVR_EXTERNAL;
1532         cmd->autoneg = p->link_config.autoneg;
1533         cmd->maxtxpkt = 0;
1534         cmd->maxrxpkt = 0;
1535         return 0;
1536 }
1537
1538 static int speed_duplex_to_caps(int speed, int duplex)
1539 {
1540         int cap = 0;
1541
1542         switch (speed) {
1543         case SPEED_10:
1544                 if (duplex == DUPLEX_FULL)
1545                         cap = SUPPORTED_10baseT_Full;
1546                 else
1547                         cap = SUPPORTED_10baseT_Half;
1548                 break;
1549         case SPEED_100:
1550                 if (duplex == DUPLEX_FULL)
1551                         cap = SUPPORTED_100baseT_Full;
1552                 else
1553                         cap = SUPPORTED_100baseT_Half;
1554                 break;
1555         case SPEED_1000:
1556                 if (duplex == DUPLEX_FULL)
1557                         cap = SUPPORTED_1000baseT_Full;
1558                 else
1559                         cap = SUPPORTED_1000baseT_Half;
1560                 break;
1561         case SPEED_10000:
1562                 if (duplex == DUPLEX_FULL)
1563                         cap = SUPPORTED_10000baseT_Full;
1564         }
1565         return cap;
1566 }
1567
1568 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1569                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1570                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1571                       ADVERTISED_10000baseT_Full)
1572
1573 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1574 {
1575         int cap;
1576         struct port_info *p = netdev_priv(dev);
1577         struct link_config *lc = &p->link_config;
1578
1579         if (!(lc->supported & SUPPORTED_Autoneg)) {
1580                 /*
1581                  * PHY offers a single speed/duplex.  See if that's what's
1582                  * being requested.
1583                  */
1584                 if (cmd->autoneg == AUTONEG_DISABLE) {
1585                         cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1586                         if (lc->supported & cap)
1587                                 return 0;
1588                 }
1589                 return -EINVAL;
1590         }
1591
1592         if (cmd->autoneg == AUTONEG_DISABLE) {
1593                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1594
1595                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1596                         return -EINVAL;
1597                 lc->requested_speed = cmd->speed;
1598                 lc->requested_duplex = cmd->duplex;
1599                 lc->advertising = 0;
1600         } else {
1601                 cmd->advertising &= ADVERTISED_MASK;
1602                 cmd->advertising &= lc->supported;
1603                 if (!cmd->advertising)
1604                         return -EINVAL;
1605                 lc->requested_speed = SPEED_INVALID;
1606                 lc->requested_duplex = DUPLEX_INVALID;
1607                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1608         }
1609         lc->autoneg = cmd->autoneg;
1610         if (netif_running(dev))
1611                 t3_link_start(&p->phy, &p->mac, lc);
1612         return 0;
1613 }
1614
1615 static void get_pauseparam(struct net_device *dev,
1616                            struct ethtool_pauseparam *epause)
1617 {
1618         struct port_info *p = netdev_priv(dev);
1619
1620         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1621         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1622         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1623 }
1624
1625 static int set_pauseparam(struct net_device *dev,
1626                           struct ethtool_pauseparam *epause)
1627 {
1628         struct port_info *p = netdev_priv(dev);
1629         struct link_config *lc = &p->link_config;
1630
1631         if (epause->autoneg == AUTONEG_DISABLE)
1632                 lc->requested_fc = 0;
1633         else if (lc->supported & SUPPORTED_Autoneg)
1634                 lc->requested_fc = PAUSE_AUTONEG;
1635         else
1636                 return -EINVAL;
1637
1638         if (epause->rx_pause)
1639                 lc->requested_fc |= PAUSE_RX;
1640         if (epause->tx_pause)
1641                 lc->requested_fc |= PAUSE_TX;
1642         if (lc->autoneg == AUTONEG_ENABLE) {
1643                 if (netif_running(dev))
1644                         t3_link_start(&p->phy, &p->mac, lc);
1645         } else {
1646                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1647                 if (netif_running(dev))
1648                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1649         }
1650         return 0;
1651 }
1652
1653 static u32 get_rx_csum(struct net_device *dev)
1654 {
1655         struct port_info *p = netdev_priv(dev);
1656
1657         return p->rx_csum_offload;
1658 }
1659
1660 static int set_rx_csum(struct net_device *dev, u32 data)
1661 {
1662         struct port_info *p = netdev_priv(dev);
1663
1664         p->rx_csum_offload = data;
1665         if (!data) {
1666                 int i;
1667
1668                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1669                         set_qset_lro(dev, i, 0);
1670         }
1671         return 0;
1672 }
1673
1674 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1675 {
1676         struct port_info *pi = netdev_priv(dev);
1677         struct adapter *adapter = pi->adapter;
1678         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1679
1680         e->rx_max_pending = MAX_RX_BUFFERS;
1681         e->rx_mini_max_pending = 0;
1682         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1683         e->tx_max_pending = MAX_TXQ_ENTRIES;
1684
1685         e->rx_pending = q->fl_size;
1686         e->rx_mini_pending = q->rspq_size;
1687         e->rx_jumbo_pending = q->jumbo_size;
1688         e->tx_pending = q->txq_size[0];
1689 }
1690
1691 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1692 {
1693         struct port_info *pi = netdev_priv(dev);
1694         struct adapter *adapter = pi->adapter;
1695         struct qset_params *q;
1696         int i;
1697
1698         if (e->rx_pending > MAX_RX_BUFFERS ||
1699             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1700             e->tx_pending > MAX_TXQ_ENTRIES ||
1701             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1702             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1703             e->rx_pending < MIN_FL_ENTRIES ||
1704             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1705             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1706                 return -EINVAL;
1707
1708         if (adapter->flags & FULL_INIT_DONE)
1709                 return -EBUSY;
1710
1711         q = &adapter->params.sge.qset[pi->first_qset];
1712         for (i = 0; i < pi->nqsets; ++i, ++q) {
1713                 q->rspq_size = e->rx_mini_pending;
1714                 q->fl_size = e->rx_pending;
1715                 q->jumbo_size = e->rx_jumbo_pending;
1716                 q->txq_size[0] = e->tx_pending;
1717                 q->txq_size[1] = e->tx_pending;
1718                 q->txq_size[2] = e->tx_pending;
1719         }
1720         return 0;
1721 }
1722
1723 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1724 {
1725         struct port_info *pi = netdev_priv(dev);
1726         struct adapter *adapter = pi->adapter;
1727         struct qset_params *qsp = &adapter->params.sge.qset[0];
1728         struct sge_qset *qs = &adapter->sge.qs[0];
1729
1730         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1731                 return -EINVAL;
1732
1733         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1734         t3_update_qset_coalesce(qs, qsp);
1735         return 0;
1736 }
1737
1738 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1739 {
1740         struct port_info *pi = netdev_priv(dev);
1741         struct adapter *adapter = pi->adapter;
1742         struct qset_params *q = adapter->params.sge.qset;
1743
1744         c->rx_coalesce_usecs = q->coalesce_usecs;
1745         return 0;
1746 }
1747
1748 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1749                       u8 * data)
1750 {
1751         struct port_info *pi = netdev_priv(dev);
1752         struct adapter *adapter = pi->adapter;
1753         int i, err = 0;
1754
1755         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1756         if (!buf)
1757                 return -ENOMEM;
1758
1759         e->magic = EEPROM_MAGIC;
1760         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1761                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1762
1763         if (!err)
1764                 memcpy(data, buf + e->offset, e->len);
1765         kfree(buf);
1766         return err;
1767 }
1768
1769 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1770                       u8 * data)
1771 {
1772         struct port_info *pi = netdev_priv(dev);
1773         struct adapter *adapter = pi->adapter;
1774         u32 aligned_offset, aligned_len;
1775         __le32 *p;
1776         u8 *buf;
1777         int err;
1778
1779         if (eeprom->magic != EEPROM_MAGIC)
1780                 return -EINVAL;
1781
1782         aligned_offset = eeprom->offset & ~3;
1783         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1784
1785         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1786                 buf = kmalloc(aligned_len, GFP_KERNEL);
1787                 if (!buf)
1788                         return -ENOMEM;
1789                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1790                 if (!err && aligned_len > 4)
1791                         err = t3_seeprom_read(adapter,
1792                                               aligned_offset + aligned_len - 4,
1793                                               (__le32 *) & buf[aligned_len - 4]);
1794                 if (err)
1795                         goto out;
1796                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1797         } else
1798                 buf = data;
1799
1800         err = t3_seeprom_wp(adapter, 0);
1801         if (err)
1802                 goto out;
1803
1804         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1805                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1806                 aligned_offset += 4;
1807         }
1808
1809         if (!err)
1810                 err = t3_seeprom_wp(adapter, 1);
1811 out:
1812         if (buf != data)
1813                 kfree(buf);
1814         return err;
1815 }
1816
1817 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1818 {
1819         wol->supported = 0;
1820         wol->wolopts = 0;
1821         memset(&wol->sopass, 0, sizeof(wol->sopass));
1822 }
1823
1824 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1825 {
1826         struct port_info *pi = netdev_priv(dev);
1827         int i;
1828
1829         if (data & ETH_FLAG_LRO) {
1830                 if (!pi->rx_csum_offload)
1831                         return -EINVAL;
1832
1833                 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1834                         set_qset_lro(dev, i, 1);
1835
1836         } else
1837                 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1838                         set_qset_lro(dev, i, 0);
1839
1840         return 0;
1841 }
1842
1843 static const struct ethtool_ops cxgb_ethtool_ops = {
1844         .get_settings = get_settings,
1845         .set_settings = set_settings,
1846         .get_drvinfo = get_drvinfo,
1847         .get_msglevel = get_msglevel,
1848         .set_msglevel = set_msglevel,
1849         .get_ringparam = get_sge_param,
1850         .set_ringparam = set_sge_param,
1851         .get_coalesce = get_coalesce,
1852         .set_coalesce = set_coalesce,
1853         .get_eeprom_len = get_eeprom_len,
1854         .get_eeprom = get_eeprom,
1855         .set_eeprom = set_eeprom,
1856         .get_pauseparam = get_pauseparam,
1857         .set_pauseparam = set_pauseparam,
1858         .get_rx_csum = get_rx_csum,
1859         .set_rx_csum = set_rx_csum,
1860         .set_tx_csum = ethtool_op_set_tx_csum,
1861         .set_sg = ethtool_op_set_sg,
1862         .get_link = ethtool_op_get_link,
1863         .get_strings = get_strings,
1864         .phys_id = cxgb3_phys_id,
1865         .nway_reset = restart_autoneg,
1866         .get_sset_count = get_sset_count,
1867         .get_ethtool_stats = get_stats,
1868         .get_regs_len = get_regs_len,
1869         .get_regs = get_regs,
1870         .get_wol = get_wol,
1871         .set_tso = ethtool_op_set_tso,
1872         .get_flags = ethtool_op_get_flags,
1873         .set_flags = cxgb3_set_flags,
1874 };
1875
1876 static int in_range(int val, int lo, int hi)
1877 {
1878         return val < 0 || (val <= hi && val >= lo);
1879 }
1880
1881 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1882 {
1883         struct port_info *pi = netdev_priv(dev);
1884         struct adapter *adapter = pi->adapter;
1885         u32 cmd;
1886         int ret;
1887
1888         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1889                 return -EFAULT;
1890
1891         switch (cmd) {
1892         case CHELSIO_SET_QSET_PARAMS:{
1893                 int i;
1894                 struct qset_params *q;
1895                 struct ch_qset_params t;
1896                 int q1 = pi->first_qset;
1897                 int nqsets = pi->nqsets;
1898
1899                 if (!capable(CAP_NET_ADMIN))
1900                         return -EPERM;
1901                 if (copy_from_user(&t, useraddr, sizeof(t)))
1902                         return -EFAULT;
1903                 if (t.qset_idx >= SGE_QSETS)
1904                         return -EINVAL;
1905                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1906                         !in_range(t.cong_thres, 0, 255) ||
1907                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1908                                 MAX_TXQ_ENTRIES) ||
1909                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1910                                 MAX_TXQ_ENTRIES) ||
1911                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1912                                 MAX_CTRL_TXQ_ENTRIES) ||
1913                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1914                                 MAX_RX_BUFFERS)
1915                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1916                                         MAX_RX_JUMBO_BUFFERS)
1917                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1918                                         MAX_RSPQ_ENTRIES))
1919                         return -EINVAL;
1920
1921                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1922                         for_each_port(adapter, i) {
1923                                 pi = adap2pinfo(adapter, i);
1924                                 if (t.qset_idx >= pi->first_qset &&
1925                                     t.qset_idx < pi->first_qset + pi->nqsets &&
1926                                     !pi->rx_csum_offload)
1927                                         return -EINVAL;
1928                         }
1929
1930                 if ((adapter->flags & FULL_INIT_DONE) &&
1931                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1932                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1933                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1934                         t.polling >= 0 || t.cong_thres >= 0))
1935                         return -EBUSY;
1936
1937                 /* Allow setting of any available qset when offload enabled */
1938                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1939                         q1 = 0;
1940                         for_each_port(adapter, i) {
1941                                 pi = adap2pinfo(adapter, i);
1942                                 nqsets += pi->first_qset + pi->nqsets;
1943                         }
1944                 }
1945
1946                 if (t.qset_idx < q1)
1947                         return -EINVAL;
1948                 if (t.qset_idx > q1 + nqsets - 1)
1949                         return -EINVAL;
1950
1951                 q = &adapter->params.sge.qset[t.qset_idx];
1952
1953                 if (t.rspq_size >= 0)
1954                         q->rspq_size = t.rspq_size;
1955                 if (t.fl_size[0] >= 0)
1956                         q->fl_size = t.fl_size[0];
1957                 if (t.fl_size[1] >= 0)
1958                         q->jumbo_size = t.fl_size[1];
1959                 if (t.txq_size[0] >= 0)
1960                         q->txq_size[0] = t.txq_size[0];
1961                 if (t.txq_size[1] >= 0)
1962                         q->txq_size[1] = t.txq_size[1];
1963                 if (t.txq_size[2] >= 0)
1964                         q->txq_size[2] = t.txq_size[2];
1965                 if (t.cong_thres >= 0)
1966                         q->cong_thres = t.cong_thres;
1967                 if (t.intr_lat >= 0) {
1968                         struct sge_qset *qs =
1969                                 &adapter->sge.qs[t.qset_idx];
1970
1971                         q->coalesce_usecs = t.intr_lat;
1972                         t3_update_qset_coalesce(qs, q);
1973                 }
1974                 if (t.polling >= 0) {
1975                         if (adapter->flags & USING_MSIX)
1976                                 q->polling = t.polling;
1977                         else {
1978                                 /* No polling with INTx for T3A */
1979                                 if (adapter->params.rev == 0 &&
1980                                         !(adapter->flags & USING_MSI))
1981                                         t.polling = 0;
1982
1983                                 for (i = 0; i < SGE_QSETS; i++) {
1984                                         q = &adapter->params.sge.
1985                                                 qset[i];
1986                                         q->polling = t.polling;
1987                                 }
1988                         }
1989                 }
1990                 if (t.lro >= 0)
1991                         set_qset_lro(dev, t.qset_idx, t.lro);
1992
1993                 break;
1994         }
1995         case CHELSIO_GET_QSET_PARAMS:{
1996                 struct qset_params *q;
1997                 struct ch_qset_params t;
1998                 int q1 = pi->first_qset;
1999                 int nqsets = pi->nqsets;
2000                 int i;
2001
2002                 if (copy_from_user(&t, useraddr, sizeof(t)))
2003                         return -EFAULT;
2004
2005                 /* Display qsets for all ports when offload enabled */
2006                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2007                         q1 = 0;
2008                         for_each_port(adapter, i) {
2009                                 pi = adap2pinfo(adapter, i);
2010                                 nqsets = pi->first_qset + pi->nqsets;
2011                         }
2012                 }
2013
2014                 if (t.qset_idx >= nqsets)
2015                         return -EINVAL;
2016
2017                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2018                 t.rspq_size = q->rspq_size;
2019                 t.txq_size[0] = q->txq_size[0];
2020                 t.txq_size[1] = q->txq_size[1];
2021                 t.txq_size[2] = q->txq_size[2];
2022                 t.fl_size[0] = q->fl_size;
2023                 t.fl_size[1] = q->jumbo_size;
2024                 t.polling = q->polling;
2025                 t.lro = q->lro;
2026                 t.intr_lat = q->coalesce_usecs;
2027                 t.cong_thres = q->cong_thres;
2028                 t.qnum = q1;
2029
2030                 if (adapter->flags & USING_MSIX)
2031                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2032                 else
2033                         t.vector = adapter->pdev->irq;
2034
2035                 if (copy_to_user(useraddr, &t, sizeof(t)))
2036                         return -EFAULT;
2037                 break;
2038         }
2039         case CHELSIO_SET_QSET_NUM:{
2040                 struct ch_reg edata;
2041                 unsigned int i, first_qset = 0, other_qsets = 0;
2042
2043                 if (!capable(CAP_NET_ADMIN))
2044                         return -EPERM;
2045                 if (adapter->flags & FULL_INIT_DONE)
2046                         return -EBUSY;
2047                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2048                         return -EFAULT;
2049                 if (edata.val < 1 ||
2050                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2051                         return -EINVAL;
2052
2053                 for_each_port(adapter, i)
2054                         if (adapter->port[i] && adapter->port[i] != dev)
2055                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2056
2057                 if (edata.val + other_qsets > SGE_QSETS)
2058                         return -EINVAL;
2059
2060                 pi->nqsets = edata.val;
2061
2062                 for_each_port(adapter, i)
2063                         if (adapter->port[i]) {
2064                                 pi = adap2pinfo(adapter, i);
2065                                 pi->first_qset = first_qset;
2066                                 first_qset += pi->nqsets;
2067                         }
2068                 break;
2069         }
2070         case CHELSIO_GET_QSET_NUM:{
2071                 struct ch_reg edata;
2072
2073                 edata.cmd = CHELSIO_GET_QSET_NUM;
2074                 edata.val = pi->nqsets;
2075                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2076                         return -EFAULT;
2077                 break;
2078         }
2079         case CHELSIO_LOAD_FW:{
2080                 u8 *fw_data;
2081                 struct ch_mem_range t;
2082
2083                 if (!capable(CAP_SYS_RAWIO))
2084                         return -EPERM;
2085                 if (copy_from_user(&t, useraddr, sizeof(t)))
2086                         return -EFAULT;
2087                 /* Check t.len sanity ? */
2088                 fw_data = kmalloc(t.len, GFP_KERNEL);
2089                 if (!fw_data)
2090                         return -ENOMEM;
2091
2092                 if (copy_from_user
2093                         (fw_data, useraddr + sizeof(t), t.len)) {
2094                         kfree(fw_data);
2095                         return -EFAULT;
2096                 }
2097
2098                 ret = t3_load_fw(adapter, fw_data, t.len);
2099                 kfree(fw_data);
2100                 if (ret)
2101                         return ret;
2102                 break;
2103         }
2104         case CHELSIO_SETMTUTAB:{
2105                 struct ch_mtus m;
2106                 int i;
2107
2108                 if (!is_offload(adapter))
2109                         return -EOPNOTSUPP;
2110                 if (!capable(CAP_NET_ADMIN))
2111                         return -EPERM;
2112                 if (offload_running(adapter))
2113                         return -EBUSY;
2114                 if (copy_from_user(&m, useraddr, sizeof(m)))
2115                         return -EFAULT;
2116                 if (m.nmtus != NMTUS)
2117                         return -EINVAL;
2118                 if (m.mtus[0] < 81)     /* accommodate SACK */
2119                         return -EINVAL;
2120
2121                 /* MTUs must be in ascending order */
2122                 for (i = 1; i < NMTUS; ++i)
2123                         if (m.mtus[i] < m.mtus[i - 1])
2124                                 return -EINVAL;
2125
2126                 memcpy(adapter->params.mtus, m.mtus,
2127                         sizeof(adapter->params.mtus));
2128                 break;
2129         }
2130         case CHELSIO_GET_PM:{
2131                 struct tp_params *p = &adapter->params.tp;
2132                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2133
2134                 if (!is_offload(adapter))
2135                         return -EOPNOTSUPP;
2136                 m.tx_pg_sz = p->tx_pg_size;
2137                 m.tx_num_pg = p->tx_num_pgs;
2138                 m.rx_pg_sz = p->rx_pg_size;
2139                 m.rx_num_pg = p->rx_num_pgs;
2140                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2141                 if (copy_to_user(useraddr, &m, sizeof(m)))
2142                         return -EFAULT;
2143                 break;
2144         }
2145         case CHELSIO_SET_PM:{
2146                 struct ch_pm m;
2147                 struct tp_params *p = &adapter->params.tp;
2148
2149                 if (!is_offload(adapter))
2150                         return -EOPNOTSUPP;
2151                 if (!capable(CAP_NET_ADMIN))
2152                         return -EPERM;
2153                 if (adapter->flags & FULL_INIT_DONE)
2154                         return -EBUSY;
2155                 if (copy_from_user(&m, useraddr, sizeof(m)))
2156                         return -EFAULT;
2157                 if (!is_power_of_2(m.rx_pg_sz) ||
2158                         !is_power_of_2(m.tx_pg_sz))
2159                         return -EINVAL; /* not power of 2 */
2160                 if (!(m.rx_pg_sz & 0x14000))
2161                         return -EINVAL; /* not 16KB or 64KB */
2162                 if (!(m.tx_pg_sz & 0x1554000))
2163                         return -EINVAL;
2164                 if (m.tx_num_pg == -1)
2165                         m.tx_num_pg = p->tx_num_pgs;
2166                 if (m.rx_num_pg == -1)
2167                         m.rx_num_pg = p->rx_num_pgs;
2168                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2169                         return -EINVAL;
2170                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2171                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2172                         return -EINVAL;
2173                 p->rx_pg_size = m.rx_pg_sz;
2174                 p->tx_pg_size = m.tx_pg_sz;
2175                 p->rx_num_pgs = m.rx_num_pg;
2176                 p->tx_num_pgs = m.tx_num_pg;
2177                 break;
2178         }
2179         case CHELSIO_GET_MEM:{
2180                 struct ch_mem_range t;
2181                 struct mc7 *mem;
2182                 u64 buf[32];
2183
2184                 if (!is_offload(adapter))
2185                         return -EOPNOTSUPP;
2186                 if (!(adapter->flags & FULL_INIT_DONE))
2187                         return -EIO;    /* need the memory controllers */
2188                 if (copy_from_user(&t, useraddr, sizeof(t)))
2189                         return -EFAULT;
2190                 if ((t.addr & 7) || (t.len & 7))
2191                         return -EINVAL;
2192                 if (t.mem_id == MEM_CM)
2193                         mem = &adapter->cm;
2194                 else if (t.mem_id == MEM_PMRX)
2195                         mem = &adapter->pmrx;
2196                 else if (t.mem_id == MEM_PMTX)
2197                         mem = &adapter->pmtx;
2198                 else
2199                         return -EINVAL;
2200
2201                 /*
2202                  * Version scheme:
2203                  * bits 0..9: chip version
2204                  * bits 10..15: chip revision
2205                  */
2206                 t.version = 3 | (adapter->params.rev << 10);
2207                 if (copy_to_user(useraddr, &t, sizeof(t)))
2208                         return -EFAULT;
2209
2210                 /*
2211                  * Read 256 bytes at a time as len can be large and we don't
2212                  * want to use huge intermediate buffers.
2213                  */
2214                 useraddr += sizeof(t);  /* advance to start of buffer */
2215                 while (t.len) {
2216                         unsigned int chunk =
2217                                 min_t(unsigned int, t.len, sizeof(buf));
2218
2219                         ret =
2220                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2221                                                 buf);
2222                         if (ret)
2223                                 return ret;
2224                         if (copy_to_user(useraddr, buf, chunk))
2225                                 return -EFAULT;
2226                         useraddr += chunk;
2227                         t.addr += chunk;
2228                         t.len -= chunk;
2229                 }
2230                 break;
2231         }
2232         case CHELSIO_SET_TRACE_FILTER:{
2233                 struct ch_trace t;
2234                 const struct trace_params *tp;
2235
2236                 if (!capable(CAP_NET_ADMIN))
2237                         return -EPERM;
2238                 if (!offload_running(adapter))
2239                         return -EAGAIN;
2240                 if (copy_from_user(&t, useraddr, sizeof(t)))
2241                         return -EFAULT;
2242
2243                 tp = (const struct trace_params *)&t.sip;
2244                 if (t.config_tx)
2245                         t3_config_trace_filter(adapter, tp, 0,
2246                                                 t.invert_match,
2247                                                 t.trace_tx);
2248                 if (t.config_rx)
2249                         t3_config_trace_filter(adapter, tp, 1,
2250                                                 t.invert_match,
2251                                                 t.trace_rx);
2252                 break;
2253         }
2254         default:
2255                 return -EOPNOTSUPP;
2256         }
2257         return 0;
2258 }
2259
2260 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2261 {
2262         struct mii_ioctl_data *data = if_mii(req);
2263         struct port_info *pi = netdev_priv(dev);
2264         struct adapter *adapter = pi->adapter;
2265         int ret, mmd;
2266
2267         switch (cmd) {
2268         case SIOCGMIIPHY:
2269                 data->phy_id = pi->phy.addr;
2270                 /* FALLTHRU */
2271         case SIOCGMIIREG:{
2272                 u32 val;
2273                 struct cphy *phy = &pi->phy;
2274
2275                 if (!phy->mdio_read)
2276                         return -EOPNOTSUPP;
2277                 if (is_10G(adapter)) {
2278                         mmd = data->phy_id >> 8;
2279                         if (!mmd)
2280                                 mmd = MDIO_DEV_PCS;
2281                         else if (mmd > MDIO_DEV_VEND2)
2282                                 return -EINVAL;
2283
2284                         ret =
2285                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2286                                                 mmd, data->reg_num, &val);
2287                 } else
2288                         ret =
2289                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2290                                                 0, data->reg_num & 0x1f,
2291                                                 &val);
2292                 if (!ret)
2293                         data->val_out = val;
2294                 break;
2295         }
2296         case SIOCSMIIREG:{
2297                 struct cphy *phy = &pi->phy;
2298
2299                 if (!capable(CAP_NET_ADMIN))
2300                         return -EPERM;
2301                 if (!phy->mdio_write)
2302                         return -EOPNOTSUPP;
2303                 if (is_10G(adapter)) {
2304                         mmd = data->phy_id >> 8;
2305                         if (!mmd)
2306                                 mmd = MDIO_DEV_PCS;
2307                         else if (mmd > MDIO_DEV_VEND2)
2308                                 return -EINVAL;
2309
2310                         ret =
2311                                 phy->mdio_write(adapter,
2312                                                 data->phy_id & 0x1f, mmd,
2313                                                 data->reg_num,
2314                                                 data->val_in);
2315                 } else
2316                         ret =
2317                                 phy->mdio_write(adapter,
2318                                                 data->phy_id & 0x1f, 0,
2319                                                 data->reg_num & 0x1f,
2320                                                 data->val_in);
2321                 break;
2322         }
2323         case SIOCCHIOCTL:
2324                 return cxgb_extension_ioctl(dev, req->ifr_data);
2325         default:
2326                 return -EOPNOTSUPP;
2327         }
2328         return ret;
2329 }
2330
2331 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2332 {
2333         struct port_info *pi = netdev_priv(dev);
2334         struct adapter *adapter = pi->adapter;
2335         int ret;
2336
2337         if (new_mtu < 81)       /* accommodate SACK */
2338                 return -EINVAL;
2339         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2340                 return ret;
2341         dev->mtu = new_mtu;
2342         init_port_mtus(adapter);
2343         if (adapter->params.rev == 0 && offload_running(adapter))
2344                 t3_load_mtus(adapter, adapter->params.mtus,
2345                              adapter->params.a_wnd, adapter->params.b_wnd,
2346                              adapter->port[0]->mtu);
2347         return 0;
2348 }
2349
2350 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2351 {
2352         struct port_info *pi = netdev_priv(dev);
2353         struct adapter *adapter = pi->adapter;
2354         struct sockaddr *addr = p;
2355
2356         if (!is_valid_ether_addr(addr->sa_data))
2357                 return -EINVAL;
2358
2359         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2360         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2361         if (offload_running(adapter))
2362                 write_smt_entry(adapter, pi->port_id);
2363         return 0;
2364 }
2365
2366 /**
2367  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2368  * @adap: the adapter
2369  * @p: the port
2370  *
2371  * Ensures that current Rx processing on any of the queues associated with
2372  * the given port completes before returning.  We do this by acquiring and
2373  * releasing the locks of the response queues associated with the port.
2374  */
2375 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2376 {
2377         int i;
2378
2379         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2380                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2381
2382                 spin_lock_irq(&q->lock);
2383                 spin_unlock_irq(&q->lock);
2384         }
2385 }
2386
2387 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2388 {
2389         struct port_info *pi = netdev_priv(dev);
2390         struct adapter *adapter = pi->adapter;
2391
2392         pi->vlan_grp = grp;
2393         if (adapter->params.rev > 0)
2394                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2395         else {
2396                 /* single control for all ports */
2397                 unsigned int i, have_vlans = 0;
2398                 for_each_port(adapter, i)
2399                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2400
2401                 t3_set_vlan_accel(adapter, 1, have_vlans);
2402         }
2403         t3_synchronize_rx(adapter, pi);
2404 }
2405
2406 #ifdef CONFIG_NET_POLL_CONTROLLER
2407 static void cxgb_netpoll(struct net_device *dev)
2408 {
2409         struct port_info *pi = netdev_priv(dev);
2410         struct adapter *adapter = pi->adapter;
2411         int qidx;
2412
2413         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2414                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2415                 void *source;
2416
2417                 if (adapter->flags & USING_MSIX)
2418                         source = qs;
2419                 else
2420                         source = adapter;
2421
2422                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2423         }
2424 }
2425 #endif
2426
2427 /*
2428  * Periodic accumulation of MAC statistics.
2429  */
2430 static void mac_stats_update(struct adapter *adapter)
2431 {
2432         int i;
2433
2434         for_each_port(adapter, i) {
2435                 struct net_device *dev = adapter->port[i];
2436                 struct port_info *p = netdev_priv(dev);
2437
2438                 if (netif_running(dev)) {
2439                         spin_lock(&adapter->stats_lock);
2440                         t3_mac_update_stats(&p->mac);
2441                         spin_unlock(&adapter->stats_lock);
2442                 }
2443         }
2444 }
2445
2446 static void check_link_status(struct adapter *adapter)
2447 {
2448         int i;
2449
2450         for_each_port(adapter, i) {
2451                 struct net_device *dev = adapter->port[i];
2452                 struct port_info *p = netdev_priv(dev);
2453
2454                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2455                         t3_link_changed(adapter, i);
2456         }
2457 }
2458
2459 static void check_t3b2_mac(struct adapter *adapter)
2460 {
2461         int i;
2462
2463         if (!rtnl_trylock())    /* synchronize with ifdown */
2464                 return;
2465
2466         for_each_port(adapter, i) {
2467                 struct net_device *dev = adapter->port[i];
2468                 struct port_info *p = netdev_priv(dev);
2469                 int status;
2470
2471                 if (!netif_running(dev))
2472                         continue;
2473
2474                 status = 0;
2475                 if (netif_running(dev) && netif_carrier_ok(dev))
2476                         status = t3b2_mac_watchdog_task(&p->mac);
2477                 if (status == 1)
2478                         p->mac.stats.num_toggled++;
2479                 else if (status == 2) {
2480                         struct cmac *mac = &p->mac;
2481
2482                         t3_mac_set_mtu(mac, dev->mtu);
2483                         t3_mac_set_address(mac, 0, dev->dev_addr);
2484                         cxgb_set_rxmode(dev);
2485                         t3_link_start(&p->phy, mac, &p->link_config);
2486                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2487                         t3_port_intr_enable(adapter, p->port_id);
2488                         p->mac.stats.num_resets++;
2489                 }
2490         }
2491         rtnl_unlock();
2492 }
2493
2494
2495 static void t3_adap_check_task(struct work_struct *work)
2496 {
2497         struct adapter *adapter = container_of(work, struct adapter,
2498                                                adap_check_task.work);
2499         const struct adapter_params *p = &adapter->params;
2500
2501         adapter->check_task_cnt++;
2502
2503         /* Check link status for PHYs without interrupts */
2504         if (p->linkpoll_period)
2505                 check_link_status(adapter);
2506
2507         /* Accumulate MAC stats if needed */
2508         if (!p->linkpoll_period ||
2509             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2510             p->stats_update_period) {
2511                 mac_stats_update(adapter);
2512                 adapter->check_task_cnt = 0;
2513         }
2514
2515         if (p->rev == T3_REV_B2)
2516                 check_t3b2_mac(adapter);
2517
2518         /* Schedule the next check update if any port is active. */
2519         spin_lock_irq(&adapter->work_lock);
2520         if (adapter->open_device_map & PORT_MASK)
2521                 schedule_chk_task(adapter);
2522         spin_unlock_irq(&adapter->work_lock);
2523 }
2524
2525 /*
2526  * Processes external (PHY) interrupts in process context.
2527  */
2528 static void ext_intr_task(struct work_struct *work)
2529 {
2530         struct adapter *adapter = container_of(work, struct adapter,
2531                                                ext_intr_handler_task);
2532
2533         t3_phy_intr_handler(adapter);
2534
2535         /* Now reenable external interrupts */
2536         spin_lock_irq(&adapter->work_lock);
2537         if (adapter->slow_intr_mask) {
2538                 adapter->slow_intr_mask |= F_T3DBG;
2539                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2540                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2541                              adapter->slow_intr_mask);
2542         }
2543         spin_unlock_irq(&adapter->work_lock);
2544 }
2545
2546 /*
2547  * Interrupt-context handler for external (PHY) interrupts.
2548  */
2549 void t3_os_ext_intr_handler(struct adapter *adapter)
2550 {
2551         /*
2552          * Schedule a task to handle external interrupts as they may be slow
2553          * and we use a mutex to protect MDIO registers.  We disable PHY
2554          * interrupts in the meantime and let the task reenable them when
2555          * it's done.
2556          */
2557         spin_lock(&adapter->work_lock);
2558         if (adapter->slow_intr_mask) {
2559                 adapter->slow_intr_mask &= ~F_T3DBG;
2560                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2561                              adapter->slow_intr_mask);
2562                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2563         }
2564         spin_unlock(&adapter->work_lock);
2565 }
2566
2567 static int t3_adapter_error(struct adapter *adapter, int reset)
2568 {
2569         int i, ret = 0;
2570
2571         /* Stop all ports */
2572         for_each_port(adapter, i) {
2573                 struct net_device *netdev = adapter->port[i];
2574
2575                 if (netif_running(netdev))
2576                         cxgb_close(netdev);
2577         }
2578
2579         if (is_offload(adapter) &&
2580             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2581                 offload_close(&adapter->tdev);
2582
2583         /* Stop SGE timers */
2584         t3_stop_sge_timers(adapter);
2585
2586         adapter->flags &= ~FULL_INIT_DONE;
2587
2588         if (reset)
2589                 ret = t3_reset_adapter(adapter);
2590
2591         pci_disable_device(adapter->pdev);
2592
2593         return ret;
2594 }
2595
2596 static int t3_reenable_adapter(struct adapter *adapter)
2597 {
2598         if (pci_enable_device(adapter->pdev)) {
2599                 dev_err(&adapter->pdev->dev,
2600                         "Cannot re-enable PCI device after reset.\n");
2601                 goto err;
2602         }
2603         pci_set_master(adapter->pdev);
2604         pci_restore_state(adapter->pdev);
2605
2606         /* Free sge resources */
2607         t3_free_sge_resources(adapter);
2608
2609         if (t3_replay_prep_adapter(adapter))
2610                 goto err;
2611
2612         return 0;
2613 err:
2614         return -1;
2615 }
2616
2617 static void t3_resume_ports(struct adapter *adapter)
2618 {
2619         int i;
2620
2621         /* Restart the ports */
2622         for_each_port(adapter, i) {
2623                 struct net_device *netdev = adapter->port[i];
2624
2625                 if (netif_running(netdev)) {
2626                         if (cxgb_open(netdev)) {
2627                                 dev_err(&adapter->pdev->dev,
2628                                         "can't bring device back up"
2629                                         " after reset\n");
2630                                 continue;
2631                         }
2632                 }
2633         }
2634 }
2635
2636 /*
2637  * processes a fatal error.
2638  * Bring the ports down, reset the chip, bring the ports back up.
2639  */
2640 static void fatal_error_task(struct work_struct *work)
2641 {
2642         struct adapter *adapter = container_of(work, struct adapter,
2643                                                fatal_error_handler_task);
2644         int err = 0;
2645
2646         rtnl_lock();
2647         err = t3_adapter_error(adapter, 1);
2648         if (!err)
2649                 err = t3_reenable_adapter(adapter);
2650         if (!err)
2651                 t3_resume_ports(adapter);
2652
2653         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2654         rtnl_unlock();
2655 }
2656
2657 void t3_fatal_err(struct adapter *adapter)
2658 {
2659         unsigned int fw_status[4];
2660
2661         if (adapter->flags & FULL_INIT_DONE) {
2662                 t3_sge_stop(adapter);
2663                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2664                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2665                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2666                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2667
2668                 spin_lock(&adapter->work_lock);
2669                 t3_intr_disable(adapter);
2670                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2671                 spin_unlock(&adapter->work_lock);
2672         }
2673         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2674         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2675                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2676                          fw_status[0], fw_status[1],
2677                          fw_status[2], fw_status[3]);
2678
2679 }
2680
2681 /**
2682  * t3_io_error_detected - called when PCI error is detected
2683  * @pdev: Pointer to PCI device
2684  * @state: The current pci connection state
2685  *
2686  * This function is called after a PCI bus error affecting
2687  * this device has been detected.
2688  */
2689 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2690                                              pci_channel_state_t state)
2691 {
2692         struct adapter *adapter = pci_get_drvdata(pdev);
2693         int ret;
2694
2695         ret = t3_adapter_error(adapter, 0);
2696
2697         /* Request a slot reset. */
2698         return PCI_ERS_RESULT_NEED_RESET;
2699 }
2700
2701 /**
2702  * t3_io_slot_reset - called after the pci bus has been reset.
2703  * @pdev: Pointer to PCI device
2704  *
2705  * Restart the card from scratch, as if from a cold-boot.
2706  */
2707 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2708 {
2709         struct adapter *adapter = pci_get_drvdata(pdev);
2710
2711         if (!t3_reenable_adapter(adapter))
2712                 return PCI_ERS_RESULT_RECOVERED;
2713
2714         return PCI_ERS_RESULT_DISCONNECT;
2715 }
2716
2717 /**
2718  * t3_io_resume - called when traffic can start flowing again.
2719  * @pdev: Pointer to PCI device
2720  *
2721  * This callback is called when the error recovery driver tells us that
2722  * its OK to resume normal operation.
2723  */
2724 static void t3_io_resume(struct pci_dev *pdev)
2725 {
2726         struct adapter *adapter = pci_get_drvdata(pdev);
2727
2728         t3_resume_ports(adapter);
2729 }
2730
2731 static struct pci_error_handlers t3_err_handler = {
2732         .error_detected = t3_io_error_detected,
2733         .slot_reset = t3_io_slot_reset,
2734         .resume = t3_io_resume,
2735 };
2736
2737 /*
2738  * Set the number of qsets based on the number of CPUs and the number of ports,
2739  * not to exceed the number of available qsets, assuming there are enough qsets
2740  * per port in HW.
2741  */
2742 static void set_nqsets(struct adapter *adap)
2743 {
2744         int i, j = 0;
2745         int num_cpus = num_online_cpus();
2746         int hwports = adap->params.nports;
2747         int nqsets = SGE_QSETS;
2748
2749         if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
2750                 if (hwports == 2 &&
2751                     (hwports * nqsets > SGE_QSETS ||
2752                      num_cpus >= nqsets / hwports))
2753                         nqsets /= hwports;
2754                 if (nqsets > num_cpus)
2755                         nqsets = num_cpus;
2756                 if (nqsets < 1 || hwports == 4)
2757                         nqsets = 1;
2758         } else
2759                 nqsets = 1;
2760
2761         for_each_port(adap, i) {
2762                 struct port_info *pi = adap2pinfo(adap, i);
2763
2764                 pi->first_qset = j;
2765                 pi->nqsets = nqsets;
2766                 j = pi->first_qset + nqsets;
2767
2768                 dev_info(&adap->pdev->dev,
2769                          "Port %d using %d queue sets.\n", i, nqsets);
2770         }
2771 }
2772
2773 static int __devinit cxgb_enable_msix(struct adapter *adap)
2774 {
2775         struct msix_entry entries[SGE_QSETS + 1];
2776         int i, err;
2777
2778         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2779                 entries[i].entry = i;
2780
2781         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2782         if (!err) {
2783                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2784                         adap->msix_info[i].vec = entries[i].vector;
2785         } else if (err > 0)
2786                 dev_info(&adap->pdev->dev,
2787                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2788         return err;
2789 }
2790
2791 static void __devinit print_port_info(struct adapter *adap,
2792                                       const struct adapter_info *ai)
2793 {
2794         static const char *pci_variant[] = {
2795                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2796         };
2797
2798         int i;
2799         char buf[80];
2800
2801         if (is_pcie(adap))
2802                 snprintf(buf, sizeof(buf), "%s x%d",
2803                          pci_variant[adap->params.pci.variant],
2804                          adap->params.pci.width);
2805         else
2806                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2807                          pci_variant[adap->params.pci.variant],
2808                          adap->params.pci.speed, adap->params.pci.width);
2809
2810         for_each_port(adap, i) {
2811                 struct net_device *dev = adap->port[i];
2812                 const struct port_info *pi = netdev_priv(dev);
2813
2814                 if (!test_bit(i, &adap->registered_device_map))
2815                         continue;
2816                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2817                        dev->name, ai->desc, pi->phy.desc,
2818                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2819                        (adap->flags & USING_MSIX) ? " MSI-X" :
2820                        (adap->flags & USING_MSI) ? " MSI" : "");
2821                 if (adap->name == dev->name && adap->params.vpd.mclk)
2822                         printk(KERN_INFO
2823                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2824                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2825                                t3_mc7_size(&adap->pmtx) >> 20,
2826                                t3_mc7_size(&adap->pmrx) >> 20,
2827                                adap->params.vpd.sn);
2828         }
2829 }
2830
2831 static const struct net_device_ops cxgb_netdev_ops = {
2832         .ndo_open               = cxgb_open,
2833         .ndo_stop               = cxgb_close,
2834         .ndo_start_xmit         = t3_eth_xmit,
2835         .ndo_get_stats          = cxgb_get_stats,
2836         .ndo_validate_addr      = eth_validate_addr,
2837         .ndo_set_multicast_list = cxgb_set_rxmode,
2838         .ndo_do_ioctl           = cxgb_ioctl,
2839         .ndo_change_mtu         = cxgb_change_mtu,
2840         .ndo_set_mac_address    = cxgb_set_mac_addr,
2841         .ndo_vlan_rx_register   = vlan_rx_register,
2842 #ifdef CONFIG_NET_POLL_CONTROLLER
2843         .ndo_poll_controller    = cxgb_netpoll,
2844 #endif
2845 };
2846
2847 static int __devinit init_one(struct pci_dev *pdev,
2848                               const struct pci_device_id *ent)
2849 {
2850         static int version_printed;
2851
2852         int i, err, pci_using_dac = 0;
2853         unsigned long mmio_start, mmio_len;
2854         const struct adapter_info *ai;
2855         struct adapter *adapter = NULL;
2856         struct port_info *pi;
2857
2858         if (!version_printed) {
2859                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2860                 ++version_printed;
2861         }
2862
2863         if (!cxgb3_wq) {
2864                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2865                 if (!cxgb3_wq) {
2866                         printk(KERN_ERR DRV_NAME
2867                                ": cannot initialize work queue\n");
2868                         return -ENOMEM;
2869                 }
2870         }
2871
2872         err = pci_request_regions(pdev, DRV_NAME);
2873         if (err) {
2874                 /* Just info, some other driver may have claimed the device. */
2875                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2876                 return err;
2877         }
2878
2879         err = pci_enable_device(pdev);
2880         if (err) {
2881                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2882                 goto out_release_regions;
2883         }
2884
2885         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2886                 pci_using_dac = 1;
2887                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2888                 if (err) {
2889                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2890                                "coherent allocations\n");
2891                         goto out_disable_device;
2892                 }
2893         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2894                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2895                 goto out_disable_device;
2896         }
2897
2898         pci_set_master(pdev);
2899         pci_save_state(pdev);
2900
2901         mmio_start = pci_resource_start(pdev, 0);
2902         mmio_len = pci_resource_len(pdev, 0);
2903         ai = t3_get_adapter_info(ent->driver_data);
2904
2905         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2906         if (!adapter) {
2907                 err = -ENOMEM;
2908                 goto out_disable_device;
2909         }
2910
2911         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2912         if (!adapter->regs) {
2913                 dev_err(&pdev->dev, "cannot map device registers\n");
2914                 err = -ENOMEM;
2915                 goto out_free_adapter;
2916         }
2917
2918         adapter->pdev = pdev;
2919         adapter->name = pci_name(pdev);
2920         adapter->msg_enable = dflt_msg_enable;
2921         adapter->mmio_len = mmio_len;
2922
2923         mutex_init(&adapter->mdio_lock);
2924         spin_lock_init(&adapter->work_lock);
2925         spin_lock_init(&adapter->stats_lock);
2926
2927         INIT_LIST_HEAD(&adapter->adapter_list);
2928         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2929         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2930         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2931
2932         for (i = 0; i < ai->nports; ++i) {
2933                 struct net_device *netdev;
2934
2935                 netdev = alloc_etherdev(sizeof(struct port_info));
2936                 if (!netdev) {
2937                         err = -ENOMEM;
2938                         goto out_free_dev;
2939                 }
2940
2941                 SET_NETDEV_DEV(netdev, &pdev->dev);
2942
2943                 adapter->port[i] = netdev;
2944                 pi = netdev_priv(netdev);
2945                 pi->adapter = adapter;
2946                 pi->rx_csum_offload = 1;
2947                 pi->port_id = i;
2948                 netif_carrier_off(netdev);
2949                 netdev->irq = pdev->irq;
2950                 netdev->mem_start = mmio_start;
2951                 netdev->mem_end = mmio_start + mmio_len - 1;
2952                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2953                 netdev->features |= NETIF_F_LLTX;
2954                 if (pci_using_dac)
2955                         netdev->features |= NETIF_F_HIGHDMA;
2956
2957                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2958                 netdev->netdev_ops = &cxgb_netdev_ops;
2959                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2960         }
2961
2962         pci_set_drvdata(pdev, adapter);
2963         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2964                 err = -ENODEV;
2965                 goto out_free_dev;
2966         }
2967
2968         /*
2969          * The card is now ready to go.  If any errors occur during device
2970          * registration we do not fail the whole card but rather proceed only
2971          * with the ports we manage to register successfully.  However we must
2972          * register at least one net device.
2973          */
2974         for_each_port(adapter, i) {
2975                 err = register_netdev(adapter->port[i]);
2976                 if (err)
2977                         dev_warn(&pdev->dev,
2978                                  "cannot register net device %s, skipping\n",
2979                                  adapter->port[i]->name);
2980                 else {
2981                         /*
2982                          * Change the name we use for messages to the name of
2983                          * the first successfully registered interface.
2984                          */
2985                         if (!adapter->registered_device_map)
2986                                 adapter->name = adapter->port[i]->name;
2987
2988                         __set_bit(i, &adapter->registered_device_map);
2989                 }
2990         }
2991         if (!adapter->registered_device_map) {
2992                 dev_err(&pdev->dev, "could not register any net devices\n");
2993                 goto out_free_dev;
2994         }
2995
2996         /* Driver's ready. Reflect it on LEDs */
2997         t3_led_ready(adapter);
2998
2999         if (is_offload(adapter)) {
3000                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
3001                 cxgb3_adapter_ofld(adapter);
3002         }
3003
3004         /* See what interrupts we'll be using */
3005         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3006                 adapter->flags |= USING_MSIX;
3007         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3008                 adapter->flags |= USING_MSI;
3009
3010         set_nqsets(adapter);
3011
3012         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3013                                  &cxgb3_attr_group);
3014
3015         print_port_info(adapter, ai);
3016         return 0;
3017
3018 out_free_dev:
3019         iounmap(adapter->regs);
3020         for (i = ai->nports - 1; i >= 0; --i)
3021                 if (adapter->port[i])
3022                         free_netdev(adapter->port[i]);
3023
3024 out_free_adapter:
3025         kfree(adapter);
3026
3027 out_disable_device:
3028         pci_disable_device(pdev);
3029 out_release_regions:
3030         pci_release_regions(pdev);
3031         pci_set_drvdata(pdev, NULL);
3032         return err;
3033 }
3034
3035 static void __devexit remove_one(struct pci_dev *pdev)
3036 {
3037         struct adapter *adapter = pci_get_drvdata(pdev);
3038
3039         if (adapter) {
3040                 int i;
3041
3042                 t3_sge_stop(adapter);
3043                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3044                                    &cxgb3_attr_group);
3045
3046                 if (is_offload(adapter)) {
3047                         cxgb3_adapter_unofld(adapter);
3048                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3049                                      &adapter->open_device_map))
3050                                 offload_close(&adapter->tdev);
3051                 }
3052
3053                 for_each_port(adapter, i)
3054                     if (test_bit(i, &adapter->registered_device_map))
3055                         unregister_netdev(adapter->port[i]);
3056
3057                 t3_stop_sge_timers(adapter);
3058                 t3_free_sge_resources(adapter);
3059                 cxgb_disable_msi(adapter);
3060
3061                 for_each_port(adapter, i)
3062                         if (adapter->port[i])
3063                                 free_netdev(adapter->port[i]);
3064
3065                 iounmap(adapter->regs);
3066                 kfree(adapter);
3067                 pci_release_regions(pdev);
3068                 pci_disable_device(pdev);
3069                 pci_set_drvdata(pdev, NULL);
3070         }
3071 }
3072
3073 static struct pci_driver driver = {
3074         .name = DRV_NAME,
3075         .id_table = cxgb3_pci_tbl,
3076         .probe = init_one,
3077         .remove = __devexit_p(remove_one),
3078         .err_handler = &t3_err_handler,
3079 };
3080
3081 static int __init cxgb3_init_module(void)
3082 {
3083         int ret;
3084
3085         cxgb3_offload_init();
3086
3087         ret = pci_register_driver(&driver);
3088         return ret;
3089 }
3090
3091 static void __exit cxgb3_cleanup_module(void)
3092 {
3093         pci_unregister_driver(&driver);
3094         if (cxgb3_wq)
3095                 destroy_workqueue(cxgb3_wq);
3096 }
3097
3098 module_init(cxgb3_init_module);
3099 module_exit(cxgb3_cleanup_module);