Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[pandora-kernel.git] / drivers / net / cxgb3 / cxgb3_main.c
1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
48
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
54
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
58
59 enum {
60         MAX_TXQ_ENTRIES = 16384,
61         MAX_CTRL_TXQ_ENTRIES = 1024,
62         MAX_RSPQ_ENTRIES = 16384,
63         MAX_RX_BUFFERS = 16384,
64         MAX_RX_JUMBO_BUFFERS = 16384,
65         MIN_TXQ_ENTRIES = 4,
66         MIN_CTRL_TXQ_ENTRIES = 4,
67         MIN_RSPQ_ENTRIES = 32,
68         MIN_FL_ENTRIES = 32
69 };
70
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
72
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
76
77 #define EEPROM_MAGIC 0x38E2F10C
78
79 #define CH_DEVICE(devid, idx) \
80         { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
81
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83         CH_DEVICE(0x20, 0),     /* PE9000 */
84         CH_DEVICE(0x21, 1),     /* T302E */
85         CH_DEVICE(0x22, 2),     /* T310E */
86         CH_DEVICE(0x23, 3),     /* T320X */
87         CH_DEVICE(0x24, 1),     /* T302X */
88         CH_DEVICE(0x25, 3),     /* T320E */
89         CH_DEVICE(0x26, 2),     /* T310X */
90         CH_DEVICE(0x30, 2),     /* T3B10 */
91         CH_DEVICE(0x31, 3),     /* T3B20 */
92         CH_DEVICE(0x32, 1),     /* T3B02 */
93         {0,}
94 };
95
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
101
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
103
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
106
107 /*
108  * The driver uses the best interrupt scheme available on a platform in the
109  * order MSI-X, MSI, legacy pin interrupts.  This parameter determines which
110  * of these schemes the driver may consider as follows:
111  *
112  * msi = 2: choose from among all three options
113  * msi = 1: only consider MSI and pin interrupts
114  * msi = 0: force pin interrupts
115  */
116 static int msi = 2;
117
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
120
121 /*
122  * The driver enables offload as a default.
123  * To disable it, use ofld_disable = 1.
124  */
125
126 static int ofld_disable = 0;
127
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
130
131 /*
132  * We have work elements that we need to cancel when an interface is taken
133  * down.  Normally the work elements would be executed by keventd but that
134  * can deadlock because of linkwatch.  If our close method takes the rtnl
135  * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136  * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137  * for our work to complete.  Get our own work queue to solve this.
138  */
139 static struct workqueue_struct *cxgb3_wq;
140
141 /**
142  *      link_report - show link status and link speed/duplex
143  *      @p: the port whose settings are to be reported
144  *
145  *      Shows the link status, speed, and duplex of a port.
146  */
147 static void link_report(struct net_device *dev)
148 {
149         if (!netif_carrier_ok(dev))
150                 printk(KERN_INFO "%s: link down\n", dev->name);
151         else {
152                 const char *s = "10Mbps";
153                 const struct port_info *p = netdev_priv(dev);
154
155                 switch (p->link_config.speed) {
156                 case SPEED_10000:
157                         s = "10Gbps";
158                         break;
159                 case SPEED_1000:
160                         s = "1000Mbps";
161                         break;
162                 case SPEED_100:
163                         s = "100Mbps";
164                         break;
165                 }
166
167                 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168                        p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
169         }
170 }
171
172 /**
173  *      t3_os_link_changed - handle link status changes
174  *      @adapter: the adapter associated with the link change
175  *      @port_id: the port index whose limk status has changed
176  *      @link_stat: the new status of the link
177  *      @speed: the new speed setting
178  *      @duplex: the new duplex setting
179  *      @pause: the new flow-control setting
180  *
181  *      This is the OS-dependent handler for link status changes.  The OS
182  *      neutral handler takes care of most of the processing for these events,
183  *      then calls this handler for any OS-specific processing.
184  */
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186                         int speed, int duplex, int pause)
187 {
188         struct net_device *dev = adapter->port[port_id];
189         struct port_info *pi = netdev_priv(dev);
190         struct cmac *mac = &pi->mac;
191
192         /* Skip changes from disabled ports. */
193         if (!netif_running(dev))
194                 return;
195
196         if (link_stat != netif_carrier_ok(dev)) {
197                 if (link_stat) {
198                         t3_mac_enable(mac, MAC_DIRECTION_RX);
199                         netif_carrier_on(dev);
200                 } else {
201                         netif_carrier_off(dev);
202                         pi->phy.ops->power_down(&pi->phy, 1);
203                         t3_mac_disable(mac, MAC_DIRECTION_RX);
204                         t3_link_start(&pi->phy, mac, &pi->link_config);
205                 }
206
207                 link_report(dev);
208         }
209 }
210
211 /**
212  *      t3_os_phymod_changed - handle PHY module changes
213  *      @phy: the PHY reporting the module change
214  *      @mod_type: new module type
215  *
216  *      This is the OS-dependent handler for PHY module changes.  It is
217  *      invoked when a PHY module is removed or inserted for any OS-specific
218  *      processing.
219  */
220 void t3_os_phymod_changed(struct adapter *adap, int port_id)
221 {
222         static const char *mod_str[] = {
223                 NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
224         };
225
226         const struct net_device *dev = adap->port[port_id];
227         const struct port_info *pi = netdev_priv(dev);
228
229         if (pi->phy.modtype == phy_modtype_none)
230                 printk(KERN_INFO "%s: PHY module unplugged\n", dev->name);
231         else
232                 printk(KERN_INFO "%s: %s PHY module inserted\n", dev->name,
233                        mod_str[pi->phy.modtype]);
234 }
235
236 static void cxgb_set_rxmode(struct net_device *dev)
237 {
238         struct t3_rx_mode rm;
239         struct port_info *pi = netdev_priv(dev);
240
241         init_rx_mode(&rm, dev, dev->mc_list);
242         t3_mac_set_rx_mode(&pi->mac, &rm);
243 }
244
245 /**
246  *      link_start - enable a port
247  *      @dev: the device to enable
248  *
249  *      Performs the MAC and PHY actions needed to enable a port.
250  */
251 static void link_start(struct net_device *dev)
252 {
253         struct t3_rx_mode rm;
254         struct port_info *pi = netdev_priv(dev);
255         struct cmac *mac = &pi->mac;
256
257         init_rx_mode(&rm, dev, dev->mc_list);
258         t3_mac_reset(mac);
259         t3_mac_set_mtu(mac, dev->mtu);
260         t3_mac_set_address(mac, 0, dev->dev_addr);
261         t3_mac_set_rx_mode(mac, &rm);
262         t3_link_start(&pi->phy, mac, &pi->link_config);
263         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
264 }
265
266 static inline void cxgb_disable_msi(struct adapter *adapter)
267 {
268         if (adapter->flags & USING_MSIX) {
269                 pci_disable_msix(adapter->pdev);
270                 adapter->flags &= ~USING_MSIX;
271         } else if (adapter->flags & USING_MSI) {
272                 pci_disable_msi(adapter->pdev);
273                 adapter->flags &= ~USING_MSI;
274         }
275 }
276
277 /*
278  * Interrupt handler for asynchronous events used with MSI-X.
279  */
280 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
281 {
282         t3_slow_intr_handler(cookie);
283         return IRQ_HANDLED;
284 }
285
286 /*
287  * Name the MSI-X interrupts.
288  */
289 static void name_msix_vecs(struct adapter *adap)
290 {
291         int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
292
293         snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
294         adap->msix_info[0].desc[n] = 0;
295
296         for_each_port(adap, j) {
297                 struct net_device *d = adap->port[j];
298                 const struct port_info *pi = netdev_priv(d);
299
300                 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
301                         snprintf(adap->msix_info[msi_idx].desc, n,
302                                  "%s-%d", d->name, pi->first_qset + i);
303                         adap->msix_info[msi_idx].desc[n] = 0;
304                 }
305         }
306 }
307
308 static int request_msix_data_irqs(struct adapter *adap)
309 {
310         int i, j, err, qidx = 0;
311
312         for_each_port(adap, i) {
313                 int nqsets = adap2pinfo(adap, i)->nqsets;
314
315                 for (j = 0; j < nqsets; ++j) {
316                         err = request_irq(adap->msix_info[qidx + 1].vec,
317                                           t3_intr_handler(adap,
318                                                           adap->sge.qs[qidx].
319                                                           rspq.polling), 0,
320                                           adap->msix_info[qidx + 1].desc,
321                                           &adap->sge.qs[qidx]);
322                         if (err) {
323                                 while (--qidx >= 0)
324                                         free_irq(adap->msix_info[qidx + 1].vec,
325                                                  &adap->sge.qs[qidx]);
326                                 return err;
327                         }
328                         qidx++;
329                 }
330         }
331         return 0;
332 }
333
334 static void free_irq_resources(struct adapter *adapter)
335 {
336         if (adapter->flags & USING_MSIX) {
337                 int i, n = 0;
338
339                 free_irq(adapter->msix_info[0].vec, adapter);
340                 for_each_port(adapter, i)
341                     n += adap2pinfo(adapter, i)->nqsets;
342
343                 for (i = 0; i < n; ++i)
344                         free_irq(adapter->msix_info[i + 1].vec,
345                                  &adapter->sge.qs[i]);
346         } else
347                 free_irq(adapter->pdev->irq, adapter);
348 }
349
350 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
351                               unsigned long n)
352 {
353         int attempts = 5;
354
355         while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
356                 if (!--attempts)
357                         return -ETIMEDOUT;
358                 msleep(10);
359         }
360         return 0;
361 }
362
363 static int init_tp_parity(struct adapter *adap)
364 {
365         int i;
366         struct sk_buff *skb;
367         struct cpl_set_tcb_field *greq;
368         unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
369
370         t3_tp_set_offload_mode(adap, 1);
371
372         for (i = 0; i < 16; i++) {
373                 struct cpl_smt_write_req *req;
374
375                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
376                 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
377                 memset(req, 0, sizeof(*req));
378                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
379                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
380                 req->iff = i;
381                 t3_mgmt_tx(adap, skb);
382         }
383
384         for (i = 0; i < 2048; i++) {
385                 struct cpl_l2t_write_req *req;
386
387                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
388                 req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
389                 memset(req, 0, sizeof(*req));
390                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
391                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
392                 req->params = htonl(V_L2T_W_IDX(i));
393                 t3_mgmt_tx(adap, skb);
394         }
395
396         for (i = 0; i < 2048; i++) {
397                 struct cpl_rte_write_req *req;
398
399                 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
400                 req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
401                 memset(req, 0, sizeof(*req));
402                 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
403                 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
404                 req->l2t_idx = htonl(V_L2T_W_IDX(i));
405                 t3_mgmt_tx(adap, skb);
406         }
407
408         skb = alloc_skb(sizeof(*greq), GFP_KERNEL | __GFP_NOFAIL);
409         greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
410         memset(greq, 0, sizeof(*greq));
411         greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
412         OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
413         greq->mask = cpu_to_be64(1);
414         t3_mgmt_tx(adap, skb);
415
416         i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
417         t3_tp_set_offload_mode(adap, 0);
418         return i;
419 }
420
421 /**
422  *      setup_rss - configure RSS
423  *      @adap: the adapter
424  *
425  *      Sets up RSS to distribute packets to multiple receive queues.  We
426  *      configure the RSS CPU lookup table to distribute to the number of HW
427  *      receive queues, and the response queue lookup table to narrow that
428  *      down to the response queues actually configured for each port.
429  *      We always configure the RSS mapping for two ports since the mapping
430  *      table has plenty of entries.
431  */
432 static void setup_rss(struct adapter *adap)
433 {
434         int i;
435         unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
436         unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
437         u8 cpus[SGE_QSETS + 1];
438         u16 rspq_map[RSS_TABLE_SIZE];
439
440         for (i = 0; i < SGE_QSETS; ++i)
441                 cpus[i] = i;
442         cpus[SGE_QSETS] = 0xff; /* terminator */
443
444         for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
445                 rspq_map[i] = i % nq0;
446                 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
447         }
448
449         t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
450                       F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
451                       V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
452 }
453
454 static void init_napi(struct adapter *adap)
455 {
456         int i;
457
458         for (i = 0; i < SGE_QSETS; i++) {
459                 struct sge_qset *qs = &adap->sge.qs[i];
460
461                 if (qs->adap)
462                         netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
463                                        64);
464         }
465
466         /*
467          * netif_napi_add() can be called only once per napi_struct because it
468          * adds each new napi_struct to a list.  Be careful not to call it a
469          * second time, e.g., during EEH recovery, by making a note of it.
470          */
471         adap->flags |= NAPI_INIT;
472 }
473
474 /*
475  * Wait until all NAPI handlers are descheduled.  This includes the handlers of
476  * both netdevices representing interfaces and the dummy ones for the extra
477  * queues.
478  */
479 static void quiesce_rx(struct adapter *adap)
480 {
481         int i;
482
483         for (i = 0; i < SGE_QSETS; i++)
484                 if (adap->sge.qs[i].adap)
485                         napi_disable(&adap->sge.qs[i].napi);
486 }
487
488 static void enable_all_napi(struct adapter *adap)
489 {
490         int i;
491         for (i = 0; i < SGE_QSETS; i++)
492                 if (adap->sge.qs[i].adap)
493                         napi_enable(&adap->sge.qs[i].napi);
494 }
495
496 /**
497  *      set_qset_lro - Turn a queue set's LRO capability on and off
498  *      @dev: the device the qset is attached to
499  *      @qset_idx: the queue set index
500  *      @val: the LRO switch
501  *
502  *      Sets LRO on or off for a particular queue set.
503  *      the device's features flag is updated to reflect the LRO
504  *      capability when all queues belonging to the device are
505  *      in the same state.
506  */
507 static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
508 {
509         struct port_info *pi = netdev_priv(dev);
510         struct adapter *adapter = pi->adapter;
511         int i, lro_on = 1;
512
513         adapter->params.sge.qset[qset_idx].lro = !!val;
514         adapter->sge.qs[qset_idx].lro_enabled = !!val;
515
516         /* let ethtool report LRO on only if all queues are LRO enabled */
517         for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; ++i)
518                 lro_on &= adapter->params.sge.qset[i].lro;
519
520         if (lro_on)
521                 dev->features |= NETIF_F_LRO;
522         else
523                 dev->features &= ~NETIF_F_LRO;
524 }
525
526 /**
527  *      setup_sge_qsets - configure SGE Tx/Rx/response queues
528  *      @adap: the adapter
529  *
530  *      Determines how many sets of SGE queues to use and initializes them.
531  *      We support multiple queue sets per port if we have MSI-X, otherwise
532  *      just one queue set per port.
533  */
534 static int setup_sge_qsets(struct adapter *adap)
535 {
536         int i, j, err, irq_idx = 0, qset_idx = 0;
537         unsigned int ntxq = SGE_TXQ_PER_SET;
538
539         if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
540                 irq_idx = -1;
541
542         for_each_port(adap, i) {
543                 struct net_device *dev = adap->port[i];
544                 struct port_info *pi = netdev_priv(dev);
545
546                 pi->qs = &adap->sge.qs[pi->first_qset];
547                 for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
548                      ++j, ++qset_idx) {
549                         set_qset_lro(dev, qset_idx, pi->rx_csum_offload);
550                         err = t3_sge_alloc_qset(adap, qset_idx, 1,
551                                 (adap->flags & USING_MSIX) ? qset_idx + 1 :
552                                                              irq_idx,
553                                 &adap->params.sge.qset[qset_idx], ntxq, dev);
554                         if (err) {
555                                 t3_stop_sge_timers(adap);
556                                 t3_free_sge_resources(adap);
557                                 return err;
558                         }
559                 }
560         }
561
562         return 0;
563 }
564
565 static ssize_t attr_show(struct device *d, char *buf,
566                          ssize_t(*format) (struct net_device *, char *))
567 {
568         ssize_t len;
569
570         /* Synchronize with ioctls that may shut down the device */
571         rtnl_lock();
572         len = (*format) (to_net_dev(d), buf);
573         rtnl_unlock();
574         return len;
575 }
576
577 static ssize_t attr_store(struct device *d,
578                           const char *buf, size_t len,
579                           ssize_t(*set) (struct net_device *, unsigned int),
580                           unsigned int min_val, unsigned int max_val)
581 {
582         char *endp;
583         ssize_t ret;
584         unsigned int val;
585
586         if (!capable(CAP_NET_ADMIN))
587                 return -EPERM;
588
589         val = simple_strtoul(buf, &endp, 0);
590         if (endp == buf || val < min_val || val > max_val)
591                 return -EINVAL;
592
593         rtnl_lock();
594         ret = (*set) (to_net_dev(d), val);
595         if (!ret)
596                 ret = len;
597         rtnl_unlock();
598         return ret;
599 }
600
601 #define CXGB3_SHOW(name, val_expr) \
602 static ssize_t format_##name(struct net_device *dev, char *buf) \
603 { \
604         struct port_info *pi = netdev_priv(dev); \
605         struct adapter *adap = pi->adapter; \
606         return sprintf(buf, "%u\n", val_expr); \
607 } \
608 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
609                            char *buf) \
610 { \
611         return attr_show(d, buf, format_##name); \
612 }
613
614 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
615 {
616         struct port_info *pi = netdev_priv(dev);
617         struct adapter *adap = pi->adapter;
618         int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
619
620         if (adap->flags & FULL_INIT_DONE)
621                 return -EBUSY;
622         if (val && adap->params.rev == 0)
623                 return -EINVAL;
624         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
625             min_tids)
626                 return -EINVAL;
627         adap->params.mc5.nfilters = val;
628         return 0;
629 }
630
631 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
632                               const char *buf, size_t len)
633 {
634         return attr_store(d, buf, len, set_nfilters, 0, ~0);
635 }
636
637 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
638 {
639         struct port_info *pi = netdev_priv(dev);
640         struct adapter *adap = pi->adapter;
641
642         if (adap->flags & FULL_INIT_DONE)
643                 return -EBUSY;
644         if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
645             MC5_MIN_TIDS)
646                 return -EINVAL;
647         adap->params.mc5.nservers = val;
648         return 0;
649 }
650
651 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
652                               const char *buf, size_t len)
653 {
654         return attr_store(d, buf, len, set_nservers, 0, ~0);
655 }
656
657 #define CXGB3_ATTR_R(name, val_expr) \
658 CXGB3_SHOW(name, val_expr) \
659 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
660
661 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
662 CXGB3_SHOW(name, val_expr) \
663 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
664
665 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
666 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
667 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
668
669 static struct attribute *cxgb3_attrs[] = {
670         &dev_attr_cam_size.attr,
671         &dev_attr_nfilters.attr,
672         &dev_attr_nservers.attr,
673         NULL
674 };
675
676 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
677
678 static ssize_t tm_attr_show(struct device *d,
679                             char *buf, int sched)
680 {
681         struct port_info *pi = netdev_priv(to_net_dev(d));
682         struct adapter *adap = pi->adapter;
683         unsigned int v, addr, bpt, cpt;
684         ssize_t len;
685
686         addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
687         rtnl_lock();
688         t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
689         v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
690         if (sched & 1)
691                 v >>= 16;
692         bpt = (v >> 8) & 0xff;
693         cpt = v & 0xff;
694         if (!cpt)
695                 len = sprintf(buf, "disabled\n");
696         else {
697                 v = (adap->params.vpd.cclk * 1000) / cpt;
698                 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
699         }
700         rtnl_unlock();
701         return len;
702 }
703
704 static ssize_t tm_attr_store(struct device *d,
705                              const char *buf, size_t len, int sched)
706 {
707         struct port_info *pi = netdev_priv(to_net_dev(d));
708         struct adapter *adap = pi->adapter;
709         unsigned int val;
710         char *endp;
711         ssize_t ret;
712
713         if (!capable(CAP_NET_ADMIN))
714                 return -EPERM;
715
716         val = simple_strtoul(buf, &endp, 0);
717         if (endp == buf || val > 10000000)
718                 return -EINVAL;
719
720         rtnl_lock();
721         ret = t3_config_sched(adap, val, sched);
722         if (!ret)
723                 ret = len;
724         rtnl_unlock();
725         return ret;
726 }
727
728 #define TM_ATTR(name, sched) \
729 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
730                            char *buf) \
731 { \
732         return tm_attr_show(d, buf, sched); \
733 } \
734 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
735                             const char *buf, size_t len) \
736 { \
737         return tm_attr_store(d, buf, len, sched); \
738 } \
739 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
740
741 TM_ATTR(sched0, 0);
742 TM_ATTR(sched1, 1);
743 TM_ATTR(sched2, 2);
744 TM_ATTR(sched3, 3);
745 TM_ATTR(sched4, 4);
746 TM_ATTR(sched5, 5);
747 TM_ATTR(sched6, 6);
748 TM_ATTR(sched7, 7);
749
750 static struct attribute *offload_attrs[] = {
751         &dev_attr_sched0.attr,
752         &dev_attr_sched1.attr,
753         &dev_attr_sched2.attr,
754         &dev_attr_sched3.attr,
755         &dev_attr_sched4.attr,
756         &dev_attr_sched5.attr,
757         &dev_attr_sched6.attr,
758         &dev_attr_sched7.attr,
759         NULL
760 };
761
762 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
763
764 /*
765  * Sends an sk_buff to an offload queue driver
766  * after dealing with any active network taps.
767  */
768 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
769 {
770         int ret;
771
772         local_bh_disable();
773         ret = t3_offload_tx(tdev, skb);
774         local_bh_enable();
775         return ret;
776 }
777
778 static int write_smt_entry(struct adapter *adapter, int idx)
779 {
780         struct cpl_smt_write_req *req;
781         struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
782
783         if (!skb)
784                 return -ENOMEM;
785
786         req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
787         req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
788         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
789         req->mtu_idx = NMTUS - 1;       /* should be 0 but there's a T3 bug */
790         req->iff = idx;
791         memset(req->src_mac1, 0, sizeof(req->src_mac1));
792         memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
793         skb->priority = 1;
794         offload_tx(&adapter->tdev, skb);
795         return 0;
796 }
797
798 static int init_smt(struct adapter *adapter)
799 {
800         int i;
801
802         for_each_port(adapter, i)
803             write_smt_entry(adapter, i);
804         return 0;
805 }
806
807 static void init_port_mtus(struct adapter *adapter)
808 {
809         unsigned int mtus = adapter->port[0]->mtu;
810
811         if (adapter->port[1])
812                 mtus |= adapter->port[1]->mtu << 16;
813         t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
814 }
815
816 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
817                               int hi, int port)
818 {
819         struct sk_buff *skb;
820         struct mngt_pktsched_wr *req;
821         int ret;
822
823         skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
824         req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
825         req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
826         req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
827         req->sched = sched;
828         req->idx = qidx;
829         req->min = lo;
830         req->max = hi;
831         req->binding = port;
832         ret = t3_mgmt_tx(adap, skb);
833
834         return ret;
835 }
836
837 static int bind_qsets(struct adapter *adap)
838 {
839         int i, j, err = 0;
840
841         for_each_port(adap, i) {
842                 const struct port_info *pi = adap2pinfo(adap, i);
843
844                 for (j = 0; j < pi->nqsets; ++j) {
845                         int ret = send_pktsched_cmd(adap, 1,
846                                                     pi->first_qset + j, -1,
847                                                     -1, i);
848                         if (ret)
849                                 err = ret;
850                 }
851         }
852
853         return err;
854 }
855
856 #define FW_FNAME "t3fw-%d.%d.%d.bin"
857 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
858
859 static int upgrade_fw(struct adapter *adap)
860 {
861         int ret;
862         char buf[64];
863         const struct firmware *fw;
864         struct device *dev = &adap->pdev->dev;
865
866         snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
867                  FW_VERSION_MINOR, FW_VERSION_MICRO);
868         ret = request_firmware(&fw, buf, dev);
869         if (ret < 0) {
870                 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
871                         buf);
872                 return ret;
873         }
874         ret = t3_load_fw(adap, fw->data, fw->size);
875         release_firmware(fw);
876
877         if (ret == 0)
878                 dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
879                          FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
880         else
881                 dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
882                         FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
883
884         return ret;
885 }
886
887 static inline char t3rev2char(struct adapter *adapter)
888 {
889         char rev = 0;
890
891         switch(adapter->params.rev) {
892         case T3_REV_B:
893         case T3_REV_B2:
894                 rev = 'b';
895                 break;
896         case T3_REV_C:
897                 rev = 'c';
898                 break;
899         }
900         return rev;
901 }
902
903 static int update_tpsram(struct adapter *adap)
904 {
905         const struct firmware *tpsram;
906         char buf[64];
907         struct device *dev = &adap->pdev->dev;
908         int ret;
909         char rev;
910
911         rev = t3rev2char(adap);
912         if (!rev)
913                 return 0;
914
915         snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
916                  TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
917
918         ret = request_firmware(&tpsram, buf, dev);
919         if (ret < 0) {
920                 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
921                         buf);
922                 return ret;
923         }
924
925         ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
926         if (ret)
927                 goto release_tpsram;
928
929         ret = t3_set_proto_sram(adap, tpsram->data);
930         if (ret == 0)
931                 dev_info(dev,
932                          "successful update of protocol engine "
933                          "to %d.%d.%d\n",
934                          TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
935         else
936                 dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
937                         TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
938         if (ret)
939                 dev_err(dev, "loading protocol SRAM failed\n");
940
941 release_tpsram:
942         release_firmware(tpsram);
943
944         return ret;
945 }
946
947 /**
948  *      cxgb_up - enable the adapter
949  *      @adapter: adapter being enabled
950  *
951  *      Called when the first port is enabled, this function performs the
952  *      actions necessary to make an adapter operational, such as completing
953  *      the initialization of HW modules, and enabling interrupts.
954  *
955  *      Must be called with the rtnl lock held.
956  */
957 static int cxgb_up(struct adapter *adap)
958 {
959         int err;
960         int must_load;
961
962         if (!(adap->flags & FULL_INIT_DONE)) {
963                 err = t3_check_fw_version(adap, &must_load);
964                 if (err == -EINVAL) {
965                         err = upgrade_fw(adap);
966                         if (err && must_load)
967                                 goto out;
968                 }
969
970                 err = t3_check_tpsram_version(adap, &must_load);
971                 if (err == -EINVAL) {
972                         err = update_tpsram(adap);
973                         if (err && must_load)
974                                 goto out;
975                 }
976
977                 /*
978                  * Clear interrupts now to catch errors if t3_init_hw fails.
979                  * We clear them again later as initialization may trigger
980                  * conditions that can interrupt.
981                  */
982                 t3_intr_clear(adap);
983
984                 err = t3_init_hw(adap, 0);
985                 if (err)
986                         goto out;
987
988                 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
989                 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
990
991                 err = setup_sge_qsets(adap);
992                 if (err)
993                         goto out;
994
995                 setup_rss(adap);
996                 if (!(adap->flags & NAPI_INIT))
997                         init_napi(adap);
998                 adap->flags |= FULL_INIT_DONE;
999         }
1000
1001         t3_intr_clear(adap);
1002
1003         if (adap->flags & USING_MSIX) {
1004                 name_msix_vecs(adap);
1005                 err = request_irq(adap->msix_info[0].vec,
1006                                   t3_async_intr_handler, 0,
1007                                   adap->msix_info[0].desc, adap);
1008                 if (err)
1009                         goto irq_err;
1010
1011                 err = request_msix_data_irqs(adap);
1012                 if (err) {
1013                         free_irq(adap->msix_info[0].vec, adap);
1014                         goto irq_err;
1015                 }
1016         } else if ((err = request_irq(adap->pdev->irq,
1017                                       t3_intr_handler(adap,
1018                                                       adap->sge.qs[0].rspq.
1019                                                       polling),
1020                                       (adap->flags & USING_MSI) ?
1021                                        0 : IRQF_SHARED,
1022                                       adap->name, adap)))
1023                 goto irq_err;
1024
1025         enable_all_napi(adap);
1026         t3_sge_start(adap);
1027         t3_intr_enable(adap);
1028
1029         if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
1030             is_offload(adap) && init_tp_parity(adap) == 0)
1031                 adap->flags |= TP_PARITY_INIT;
1032
1033         if (adap->flags & TP_PARITY_INIT) {
1034                 t3_write_reg(adap, A_TP_INT_CAUSE,
1035                              F_CMCACHEPERR | F_ARPLUTPERR);
1036                 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
1037         }
1038
1039         if (!(adap->flags & QUEUES_BOUND)) {
1040                 err = bind_qsets(adap);
1041                 if (err) {
1042                         CH_ERR(adap, "failed to bind qsets, err %d\n", err);
1043                         t3_intr_disable(adap);
1044                         free_irq_resources(adap);
1045                         goto out;
1046                 }
1047                 adap->flags |= QUEUES_BOUND;
1048         }
1049
1050 out:
1051         return err;
1052 irq_err:
1053         CH_ERR(adap, "request_irq failed, err %d\n", err);
1054         goto out;
1055 }
1056
1057 /*
1058  * Release resources when all the ports and offloading have been stopped.
1059  */
1060 static void cxgb_down(struct adapter *adapter)
1061 {
1062         t3_sge_stop(adapter);
1063         spin_lock_irq(&adapter->work_lock);     /* sync with PHY intr task */
1064         t3_intr_disable(adapter);
1065         spin_unlock_irq(&adapter->work_lock);
1066
1067         free_irq_resources(adapter);
1068         flush_workqueue(cxgb3_wq);      /* wait for external IRQ handler */
1069         quiesce_rx(adapter);
1070 }
1071
1072 static void schedule_chk_task(struct adapter *adap)
1073 {
1074         unsigned int timeo;
1075
1076         timeo = adap->params.linkpoll_period ?
1077             (HZ * adap->params.linkpoll_period) / 10 :
1078             adap->params.stats_update_period * HZ;
1079         if (timeo)
1080                 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
1081 }
1082
1083 static int offload_open(struct net_device *dev)
1084 {
1085         struct port_info *pi = netdev_priv(dev);
1086         struct adapter *adapter = pi->adapter;
1087         struct t3cdev *tdev = dev2t3cdev(dev);
1088         int adap_up = adapter->open_device_map & PORT_MASK;
1089         int err;
1090
1091         if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1092                 return 0;
1093
1094         if (!adap_up && (err = cxgb_up(adapter)) < 0)
1095                 goto out;
1096
1097         t3_tp_set_offload_mode(adapter, 1);
1098         tdev->lldev = adapter->port[0];
1099         err = cxgb3_offload_activate(adapter);
1100         if (err)
1101                 goto out;
1102
1103         init_port_mtus(adapter);
1104         t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
1105                      adapter->params.b_wnd,
1106                      adapter->params.rev == 0 ?
1107                      adapter->port[0]->mtu : 0xffff);
1108         init_smt(adapter);
1109
1110         if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1111                 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1112
1113         /* Call back all registered clients */
1114         cxgb3_add_clients(tdev);
1115
1116 out:
1117         /* restore them in case the offload module has changed them */
1118         if (err) {
1119                 t3_tp_set_offload_mode(adapter, 0);
1120                 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1121                 cxgb3_set_dummy_ops(tdev);
1122         }
1123         return err;
1124 }
1125
1126 static int offload_close(struct t3cdev *tdev)
1127 {
1128         struct adapter *adapter = tdev2adap(tdev);
1129
1130         if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1131                 return 0;
1132
1133         /* Call back all registered clients */
1134         cxgb3_remove_clients(tdev);
1135
1136         sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1137
1138         tdev->lldev = NULL;
1139         cxgb3_set_dummy_ops(tdev);
1140         t3_tp_set_offload_mode(adapter, 0);
1141         clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
1142
1143         if (!adapter->open_device_map)
1144                 cxgb_down(adapter);
1145
1146         cxgb3_offload_deactivate(adapter);
1147         return 0;
1148 }
1149
1150 static int cxgb_open(struct net_device *dev)
1151 {
1152         struct port_info *pi = netdev_priv(dev);
1153         struct adapter *adapter = pi->adapter;
1154         int other_ports = adapter->open_device_map & PORT_MASK;
1155         int err;
1156
1157         if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
1158                 return err;
1159
1160         set_bit(pi->port_id, &adapter->open_device_map);
1161         if (is_offload(adapter) && !ofld_disable) {
1162                 err = offload_open(dev);
1163                 if (err)
1164                         printk(KERN_WARNING
1165                                "Could not initialize offload capabilities\n");
1166         }
1167
1168         link_start(dev);
1169         t3_port_intr_enable(adapter, pi->port_id);
1170         netif_start_queue(dev);
1171         if (!other_ports)
1172                 schedule_chk_task(adapter);
1173
1174         return 0;
1175 }
1176
1177 static int cxgb_close(struct net_device *dev)
1178 {
1179         struct port_info *pi = netdev_priv(dev);
1180         struct adapter *adapter = pi->adapter;
1181
1182         t3_port_intr_disable(adapter, pi->port_id);
1183         netif_stop_queue(dev);
1184         pi->phy.ops->power_down(&pi->phy, 1);
1185         netif_carrier_off(dev);
1186         t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
1187
1188         spin_lock_irq(&adapter->work_lock);     /* sync with update task */
1189         clear_bit(pi->port_id, &adapter->open_device_map);
1190         spin_unlock_irq(&adapter->work_lock);
1191
1192         if (!(adapter->open_device_map & PORT_MASK))
1193                 cancel_rearming_delayed_workqueue(cxgb3_wq,
1194                                                   &adapter->adap_check_task);
1195
1196         if (!adapter->open_device_map)
1197                 cxgb_down(adapter);
1198
1199         return 0;
1200 }
1201
1202 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
1203 {
1204         struct port_info *pi = netdev_priv(dev);
1205         struct adapter *adapter = pi->adapter;
1206         struct net_device_stats *ns = &pi->netstats;
1207         const struct mac_stats *pstats;
1208
1209         spin_lock(&adapter->stats_lock);
1210         pstats = t3_mac_update_stats(&pi->mac);
1211         spin_unlock(&adapter->stats_lock);
1212
1213         ns->tx_bytes = pstats->tx_octets;
1214         ns->tx_packets = pstats->tx_frames;
1215         ns->rx_bytes = pstats->rx_octets;
1216         ns->rx_packets = pstats->rx_frames;
1217         ns->multicast = pstats->rx_mcast_frames;
1218
1219         ns->tx_errors = pstats->tx_underrun;
1220         ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
1221             pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
1222             pstats->rx_fifo_ovfl;
1223
1224         /* detailed rx_errors */
1225         ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1226         ns->rx_over_errors = 0;
1227         ns->rx_crc_errors = pstats->rx_fcs_errs;
1228         ns->rx_frame_errors = pstats->rx_symbol_errs;
1229         ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1230         ns->rx_missed_errors = pstats->rx_cong_drops;
1231
1232         /* detailed tx_errors */
1233         ns->tx_aborted_errors = 0;
1234         ns->tx_carrier_errors = 0;
1235         ns->tx_fifo_errors = pstats->tx_underrun;
1236         ns->tx_heartbeat_errors = 0;
1237         ns->tx_window_errors = 0;
1238         return ns;
1239 }
1240
1241 static u32 get_msglevel(struct net_device *dev)
1242 {
1243         struct port_info *pi = netdev_priv(dev);
1244         struct adapter *adapter = pi->adapter;
1245
1246         return adapter->msg_enable;
1247 }
1248
1249 static void set_msglevel(struct net_device *dev, u32 val)
1250 {
1251         struct port_info *pi = netdev_priv(dev);
1252         struct adapter *adapter = pi->adapter;
1253
1254         adapter->msg_enable = val;
1255 }
1256
1257 static char stats_strings[][ETH_GSTRING_LEN] = {
1258         "TxOctetsOK         ",
1259         "TxFramesOK         ",
1260         "TxMulticastFramesOK",
1261         "TxBroadcastFramesOK",
1262         "TxPauseFrames      ",
1263         "TxUnderrun         ",
1264         "TxExtUnderrun      ",
1265
1266         "TxFrames64         ",
1267         "TxFrames65To127    ",
1268         "TxFrames128To255   ",
1269         "TxFrames256To511   ",
1270         "TxFrames512To1023  ",
1271         "TxFrames1024To1518 ",
1272         "TxFrames1519ToMax  ",
1273
1274         "RxOctetsOK         ",
1275         "RxFramesOK         ",
1276         "RxMulticastFramesOK",
1277         "RxBroadcastFramesOK",
1278         "RxPauseFrames      ",
1279         "RxFCSErrors        ",
1280         "RxSymbolErrors     ",
1281         "RxShortErrors      ",
1282         "RxJabberErrors     ",
1283         "RxLengthErrors     ",
1284         "RxFIFOoverflow     ",
1285
1286         "RxFrames64         ",
1287         "RxFrames65To127    ",
1288         "RxFrames128To255   ",
1289         "RxFrames256To511   ",
1290         "RxFrames512To1023  ",
1291         "RxFrames1024To1518 ",
1292         "RxFrames1519ToMax  ",
1293
1294         "PhyFIFOErrors      ",
1295         "TSO                ",
1296         "VLANextractions    ",
1297         "VLANinsertions     ",
1298         "TxCsumOffload      ",
1299         "RxCsumGood         ",
1300         "LroAggregated      ",
1301         "LroFlushed         ",
1302         "LroNoDesc          ",
1303         "RxDrops            ",
1304
1305         "CheckTXEnToggled   ",
1306         "CheckResets        ",
1307
1308 };
1309
1310 static int get_sset_count(struct net_device *dev, int sset)
1311 {
1312         switch (sset) {
1313         case ETH_SS_STATS:
1314                 return ARRAY_SIZE(stats_strings);
1315         default:
1316                 return -EOPNOTSUPP;
1317         }
1318 }
1319
1320 #define T3_REGMAP_SIZE (3 * 1024)
1321
1322 static int get_regs_len(struct net_device *dev)
1323 {
1324         return T3_REGMAP_SIZE;
1325 }
1326
1327 static int get_eeprom_len(struct net_device *dev)
1328 {
1329         return EEPROMSIZE;
1330 }
1331
1332 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1333 {
1334         struct port_info *pi = netdev_priv(dev);
1335         struct adapter *adapter = pi->adapter;
1336         u32 fw_vers = 0;
1337         u32 tp_vers = 0;
1338
1339         t3_get_fw_version(adapter, &fw_vers);
1340         t3_get_tp_version(adapter, &tp_vers);
1341
1342         strcpy(info->driver, DRV_NAME);
1343         strcpy(info->version, DRV_VERSION);
1344         strcpy(info->bus_info, pci_name(adapter->pdev));
1345         if (!fw_vers)
1346                 strcpy(info->fw_version, "N/A");
1347         else {
1348                 snprintf(info->fw_version, sizeof(info->fw_version),
1349                          "%s %u.%u.%u TP %u.%u.%u",
1350                          G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1351                          G_FW_VERSION_MAJOR(fw_vers),
1352                          G_FW_VERSION_MINOR(fw_vers),
1353                          G_FW_VERSION_MICRO(fw_vers),
1354                          G_TP_VERSION_MAJOR(tp_vers),
1355                          G_TP_VERSION_MINOR(tp_vers),
1356                          G_TP_VERSION_MICRO(tp_vers));
1357         }
1358 }
1359
1360 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1361 {
1362         if (stringset == ETH_SS_STATS)
1363                 memcpy(data, stats_strings, sizeof(stats_strings));
1364 }
1365
1366 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1367                                             struct port_info *p, int idx)
1368 {
1369         int i;
1370         unsigned long tot = 0;
1371
1372         for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
1373                 tot += adapter->sge.qs[i].port_stats[idx];
1374         return tot;
1375 }
1376
1377 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1378                       u64 *data)
1379 {
1380         struct port_info *pi = netdev_priv(dev);
1381         struct adapter *adapter = pi->adapter;
1382         const struct mac_stats *s;
1383
1384         spin_lock(&adapter->stats_lock);
1385         s = t3_mac_update_stats(&pi->mac);
1386         spin_unlock(&adapter->stats_lock);
1387
1388         *data++ = s->tx_octets;
1389         *data++ = s->tx_frames;
1390         *data++ = s->tx_mcast_frames;
1391         *data++ = s->tx_bcast_frames;
1392         *data++ = s->tx_pause;
1393         *data++ = s->tx_underrun;
1394         *data++ = s->tx_fifo_urun;
1395
1396         *data++ = s->tx_frames_64;
1397         *data++ = s->tx_frames_65_127;
1398         *data++ = s->tx_frames_128_255;
1399         *data++ = s->tx_frames_256_511;
1400         *data++ = s->tx_frames_512_1023;
1401         *data++ = s->tx_frames_1024_1518;
1402         *data++ = s->tx_frames_1519_max;
1403
1404         *data++ = s->rx_octets;
1405         *data++ = s->rx_frames;
1406         *data++ = s->rx_mcast_frames;
1407         *data++ = s->rx_bcast_frames;
1408         *data++ = s->rx_pause;
1409         *data++ = s->rx_fcs_errs;
1410         *data++ = s->rx_symbol_errs;
1411         *data++ = s->rx_short;
1412         *data++ = s->rx_jabber;
1413         *data++ = s->rx_too_long;
1414         *data++ = s->rx_fifo_ovfl;
1415
1416         *data++ = s->rx_frames_64;
1417         *data++ = s->rx_frames_65_127;
1418         *data++ = s->rx_frames_128_255;
1419         *data++ = s->rx_frames_256_511;
1420         *data++ = s->rx_frames_512_1023;
1421         *data++ = s->rx_frames_1024_1518;
1422         *data++ = s->rx_frames_1519_max;
1423
1424         *data++ = pi->phy.fifo_errors;
1425
1426         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1427         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1428         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1429         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1430         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1431         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_AGGR);
1432         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_FLUSHED);
1433         *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_LRO_NO_DESC);
1434         *data++ = s->rx_cong_drops;
1435
1436         *data++ = s->num_toggled;
1437         *data++ = s->num_resets;
1438 }
1439
1440 static inline void reg_block_dump(struct adapter *ap, void *buf,
1441                                   unsigned int start, unsigned int end)
1442 {
1443         u32 *p = buf + start;
1444
1445         for (; start <= end; start += sizeof(u32))
1446                 *p++ = t3_read_reg(ap, start);
1447 }
1448
1449 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1450                      void *buf)
1451 {
1452         struct port_info *pi = netdev_priv(dev);
1453         struct adapter *ap = pi->adapter;
1454
1455         /*
1456          * Version scheme:
1457          * bits 0..9: chip version
1458          * bits 10..15: chip revision
1459          * bit 31: set for PCIe cards
1460          */
1461         regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1462
1463         /*
1464          * We skip the MAC statistics registers because they are clear-on-read.
1465          * Also reading multi-register stats would need to synchronize with the
1466          * periodic mac stats accumulation.  Hard to justify the complexity.
1467          */
1468         memset(buf, 0, T3_REGMAP_SIZE);
1469         reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1470         reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1471         reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1472         reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1473         reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1474         reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1475                        XGM_REG(A_XGM_SERDES_STAT3, 1));
1476         reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1477                        XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1478 }
1479
1480 static int restart_autoneg(struct net_device *dev)
1481 {
1482         struct port_info *p = netdev_priv(dev);
1483
1484         if (!netif_running(dev))
1485                 return -EAGAIN;
1486         if (p->link_config.autoneg != AUTONEG_ENABLE)
1487                 return -EINVAL;
1488         p->phy.ops->autoneg_restart(&p->phy);
1489         return 0;
1490 }
1491
1492 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1493 {
1494         struct port_info *pi = netdev_priv(dev);
1495         struct adapter *adapter = pi->adapter;
1496         int i;
1497
1498         if (data == 0)
1499                 data = 2;
1500
1501         for (i = 0; i < data * 2; i++) {
1502                 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1503                                  (i & 1) ? F_GPIO0_OUT_VAL : 0);
1504                 if (msleep_interruptible(500))
1505                         break;
1506         }
1507         t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1508                          F_GPIO0_OUT_VAL);
1509         return 0;
1510 }
1511
1512 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1513 {
1514         struct port_info *p = netdev_priv(dev);
1515
1516         cmd->supported = p->link_config.supported;
1517         cmd->advertising = p->link_config.advertising;
1518
1519         if (netif_carrier_ok(dev)) {
1520                 cmd->speed = p->link_config.speed;
1521                 cmd->duplex = p->link_config.duplex;
1522         } else {
1523                 cmd->speed = -1;
1524                 cmd->duplex = -1;
1525         }
1526
1527         cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1528         cmd->phy_address = p->phy.addr;
1529         cmd->transceiver = XCVR_EXTERNAL;
1530         cmd->autoneg = p->link_config.autoneg;
1531         cmd->maxtxpkt = 0;
1532         cmd->maxrxpkt = 0;
1533         return 0;
1534 }
1535
1536 static int speed_duplex_to_caps(int speed, int duplex)
1537 {
1538         int cap = 0;
1539
1540         switch (speed) {
1541         case SPEED_10:
1542                 if (duplex == DUPLEX_FULL)
1543                         cap = SUPPORTED_10baseT_Full;
1544                 else
1545                         cap = SUPPORTED_10baseT_Half;
1546                 break;
1547         case SPEED_100:
1548                 if (duplex == DUPLEX_FULL)
1549                         cap = SUPPORTED_100baseT_Full;
1550                 else
1551                         cap = SUPPORTED_100baseT_Half;
1552                 break;
1553         case SPEED_1000:
1554                 if (duplex == DUPLEX_FULL)
1555                         cap = SUPPORTED_1000baseT_Full;
1556                 else
1557                         cap = SUPPORTED_1000baseT_Half;
1558                 break;
1559         case SPEED_10000:
1560                 if (duplex == DUPLEX_FULL)
1561                         cap = SUPPORTED_10000baseT_Full;
1562         }
1563         return cap;
1564 }
1565
1566 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1567                       ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1568                       ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1569                       ADVERTISED_10000baseT_Full)
1570
1571 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1572 {
1573         int cap;
1574         struct port_info *p = netdev_priv(dev);
1575         struct link_config *lc = &p->link_config;
1576
1577         if (!(lc->supported & SUPPORTED_Autoneg)) {
1578                 /*
1579                  * PHY offers a single speed/duplex.  See if that's what's
1580                  * being requested.
1581                  */
1582                 if (cmd->autoneg == AUTONEG_DISABLE) {
1583                         cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1584                         if (lc->supported & cap)
1585                                 return 0;
1586                 }
1587                 return -EINVAL;
1588         }
1589
1590         if (cmd->autoneg == AUTONEG_DISABLE) {
1591                 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1592
1593                 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1594                         return -EINVAL;
1595                 lc->requested_speed = cmd->speed;
1596                 lc->requested_duplex = cmd->duplex;
1597                 lc->advertising = 0;
1598         } else {
1599                 cmd->advertising &= ADVERTISED_MASK;
1600                 cmd->advertising &= lc->supported;
1601                 if (!cmd->advertising)
1602                         return -EINVAL;
1603                 lc->requested_speed = SPEED_INVALID;
1604                 lc->requested_duplex = DUPLEX_INVALID;
1605                 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1606         }
1607         lc->autoneg = cmd->autoneg;
1608         if (netif_running(dev))
1609                 t3_link_start(&p->phy, &p->mac, lc);
1610         return 0;
1611 }
1612
1613 static void get_pauseparam(struct net_device *dev,
1614                            struct ethtool_pauseparam *epause)
1615 {
1616         struct port_info *p = netdev_priv(dev);
1617
1618         epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1619         epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1620         epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1621 }
1622
1623 static int set_pauseparam(struct net_device *dev,
1624                           struct ethtool_pauseparam *epause)
1625 {
1626         struct port_info *p = netdev_priv(dev);
1627         struct link_config *lc = &p->link_config;
1628
1629         if (epause->autoneg == AUTONEG_DISABLE)
1630                 lc->requested_fc = 0;
1631         else if (lc->supported & SUPPORTED_Autoneg)
1632                 lc->requested_fc = PAUSE_AUTONEG;
1633         else
1634                 return -EINVAL;
1635
1636         if (epause->rx_pause)
1637                 lc->requested_fc |= PAUSE_RX;
1638         if (epause->tx_pause)
1639                 lc->requested_fc |= PAUSE_TX;
1640         if (lc->autoneg == AUTONEG_ENABLE) {
1641                 if (netif_running(dev))
1642                         t3_link_start(&p->phy, &p->mac, lc);
1643         } else {
1644                 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1645                 if (netif_running(dev))
1646                         t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1647         }
1648         return 0;
1649 }
1650
1651 static u32 get_rx_csum(struct net_device *dev)
1652 {
1653         struct port_info *p = netdev_priv(dev);
1654
1655         return p->rx_csum_offload;
1656 }
1657
1658 static int set_rx_csum(struct net_device *dev, u32 data)
1659 {
1660         struct port_info *p = netdev_priv(dev);
1661
1662         p->rx_csum_offload = data;
1663         if (!data) {
1664                 int i;
1665
1666                 for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
1667                         set_qset_lro(dev, i, 0);
1668         }
1669         return 0;
1670 }
1671
1672 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1673 {
1674         struct port_info *pi = netdev_priv(dev);
1675         struct adapter *adapter = pi->adapter;
1676         const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1677
1678         e->rx_max_pending = MAX_RX_BUFFERS;
1679         e->rx_mini_max_pending = 0;
1680         e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1681         e->tx_max_pending = MAX_TXQ_ENTRIES;
1682
1683         e->rx_pending = q->fl_size;
1684         e->rx_mini_pending = q->rspq_size;
1685         e->rx_jumbo_pending = q->jumbo_size;
1686         e->tx_pending = q->txq_size[0];
1687 }
1688
1689 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1690 {
1691         struct port_info *pi = netdev_priv(dev);
1692         struct adapter *adapter = pi->adapter;
1693         struct qset_params *q;
1694         int i;
1695
1696         if (e->rx_pending > MAX_RX_BUFFERS ||
1697             e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1698             e->tx_pending > MAX_TXQ_ENTRIES ||
1699             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1700             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1701             e->rx_pending < MIN_FL_ENTRIES ||
1702             e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1703             e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1704                 return -EINVAL;
1705
1706         if (adapter->flags & FULL_INIT_DONE)
1707                 return -EBUSY;
1708
1709         q = &adapter->params.sge.qset[pi->first_qset];
1710         for (i = 0; i < pi->nqsets; ++i, ++q) {
1711                 q->rspq_size = e->rx_mini_pending;
1712                 q->fl_size = e->rx_pending;
1713                 q->jumbo_size = e->rx_jumbo_pending;
1714                 q->txq_size[0] = e->tx_pending;
1715                 q->txq_size[1] = e->tx_pending;
1716                 q->txq_size[2] = e->tx_pending;
1717         }
1718         return 0;
1719 }
1720
1721 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1722 {
1723         struct port_info *pi = netdev_priv(dev);
1724         struct adapter *adapter = pi->adapter;
1725         struct qset_params *qsp = &adapter->params.sge.qset[0];
1726         struct sge_qset *qs = &adapter->sge.qs[0];
1727
1728         if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1729                 return -EINVAL;
1730
1731         qsp->coalesce_usecs = c->rx_coalesce_usecs;
1732         t3_update_qset_coalesce(qs, qsp);
1733         return 0;
1734 }
1735
1736 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1737 {
1738         struct port_info *pi = netdev_priv(dev);
1739         struct adapter *adapter = pi->adapter;
1740         struct qset_params *q = adapter->params.sge.qset;
1741
1742         c->rx_coalesce_usecs = q->coalesce_usecs;
1743         return 0;
1744 }
1745
1746 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1747                       u8 * data)
1748 {
1749         struct port_info *pi = netdev_priv(dev);
1750         struct adapter *adapter = pi->adapter;
1751         int i, err = 0;
1752
1753         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1754         if (!buf)
1755                 return -ENOMEM;
1756
1757         e->magic = EEPROM_MAGIC;
1758         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1759                 err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
1760
1761         if (!err)
1762                 memcpy(data, buf + e->offset, e->len);
1763         kfree(buf);
1764         return err;
1765 }
1766
1767 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1768                       u8 * data)
1769 {
1770         struct port_info *pi = netdev_priv(dev);
1771         struct adapter *adapter = pi->adapter;
1772         u32 aligned_offset, aligned_len;
1773         __le32 *p;
1774         u8 *buf;
1775         int err;
1776
1777         if (eeprom->magic != EEPROM_MAGIC)
1778                 return -EINVAL;
1779
1780         aligned_offset = eeprom->offset & ~3;
1781         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1782
1783         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1784                 buf = kmalloc(aligned_len, GFP_KERNEL);
1785                 if (!buf)
1786                         return -ENOMEM;
1787                 err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
1788                 if (!err && aligned_len > 4)
1789                         err = t3_seeprom_read(adapter,
1790                                               aligned_offset + aligned_len - 4,
1791                                               (__le32 *) & buf[aligned_len - 4]);
1792                 if (err)
1793                         goto out;
1794                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1795         } else
1796                 buf = data;
1797
1798         err = t3_seeprom_wp(adapter, 0);
1799         if (err)
1800                 goto out;
1801
1802         for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1803                 err = t3_seeprom_write(adapter, aligned_offset, *p);
1804                 aligned_offset += 4;
1805         }
1806
1807         if (!err)
1808                 err = t3_seeprom_wp(adapter, 1);
1809 out:
1810         if (buf != data)
1811                 kfree(buf);
1812         return err;
1813 }
1814
1815 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1816 {
1817         wol->supported = 0;
1818         wol->wolopts = 0;
1819         memset(&wol->sopass, 0, sizeof(wol->sopass));
1820 }
1821
1822 static int cxgb3_set_flags(struct net_device *dev, u32 data)
1823 {
1824         struct port_info *pi = netdev_priv(dev);
1825         int i;
1826
1827         if (data & ETH_FLAG_LRO) {
1828                 if (!pi->rx_csum_offload)
1829                         return -EINVAL;
1830
1831                 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1832                         set_qset_lro(dev, i, 1);
1833
1834         } else
1835                 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++)
1836                         set_qset_lro(dev, i, 0);
1837
1838         return 0;
1839 }
1840
1841 static const struct ethtool_ops cxgb_ethtool_ops = {
1842         .get_settings = get_settings,
1843         .set_settings = set_settings,
1844         .get_drvinfo = get_drvinfo,
1845         .get_msglevel = get_msglevel,
1846         .set_msglevel = set_msglevel,
1847         .get_ringparam = get_sge_param,
1848         .set_ringparam = set_sge_param,
1849         .get_coalesce = get_coalesce,
1850         .set_coalesce = set_coalesce,
1851         .get_eeprom_len = get_eeprom_len,
1852         .get_eeprom = get_eeprom,
1853         .set_eeprom = set_eeprom,
1854         .get_pauseparam = get_pauseparam,
1855         .set_pauseparam = set_pauseparam,
1856         .get_rx_csum = get_rx_csum,
1857         .set_rx_csum = set_rx_csum,
1858         .set_tx_csum = ethtool_op_set_tx_csum,
1859         .set_sg = ethtool_op_set_sg,
1860         .get_link = ethtool_op_get_link,
1861         .get_strings = get_strings,
1862         .phys_id = cxgb3_phys_id,
1863         .nway_reset = restart_autoneg,
1864         .get_sset_count = get_sset_count,
1865         .get_ethtool_stats = get_stats,
1866         .get_regs_len = get_regs_len,
1867         .get_regs = get_regs,
1868         .get_wol = get_wol,
1869         .set_tso = ethtool_op_set_tso,
1870         .get_flags = ethtool_op_get_flags,
1871         .set_flags = cxgb3_set_flags,
1872 };
1873
1874 static int in_range(int val, int lo, int hi)
1875 {
1876         return val < 0 || (val <= hi && val >= lo);
1877 }
1878
1879 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1880 {
1881         struct port_info *pi = netdev_priv(dev);
1882         struct adapter *adapter = pi->adapter;
1883         u32 cmd;
1884         int ret;
1885
1886         if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1887                 return -EFAULT;
1888
1889         switch (cmd) {
1890         case CHELSIO_SET_QSET_PARAMS:{
1891                 int i;
1892                 struct qset_params *q;
1893                 struct ch_qset_params t;
1894                 int q1 = pi->first_qset;
1895                 int nqsets = pi->nqsets;
1896
1897                 if (!capable(CAP_NET_ADMIN))
1898                         return -EPERM;
1899                 if (copy_from_user(&t, useraddr, sizeof(t)))
1900                         return -EFAULT;
1901                 if (t.qset_idx >= SGE_QSETS)
1902                         return -EINVAL;
1903                 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1904                         !in_range(t.cong_thres, 0, 255) ||
1905                         !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1906                                 MAX_TXQ_ENTRIES) ||
1907                         !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1908                                 MAX_TXQ_ENTRIES) ||
1909                         !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1910                                 MAX_CTRL_TXQ_ENTRIES) ||
1911                         !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1912                                 MAX_RX_BUFFERS)
1913                         || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1914                                         MAX_RX_JUMBO_BUFFERS)
1915                         || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1916                                         MAX_RSPQ_ENTRIES))
1917                         return -EINVAL;
1918
1919                 if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
1920                         for_each_port(adapter, i) {
1921                                 pi = adap2pinfo(adapter, i);
1922                                 if (t.qset_idx >= pi->first_qset &&
1923                                     t.qset_idx < pi->first_qset + pi->nqsets &&
1924                                     !pi->rx_csum_offload)
1925                                         return -EINVAL;
1926                         }
1927
1928                 if ((adapter->flags & FULL_INIT_DONE) &&
1929                         (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1930                         t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1931                         t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1932                         t.polling >= 0 || t.cong_thres >= 0))
1933                         return -EBUSY;
1934
1935                 /* Allow setting of any available qset when offload enabled */
1936                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
1937                         q1 = 0;
1938                         for_each_port(adapter, i) {
1939                                 pi = adap2pinfo(adapter, i);
1940                                 nqsets += pi->first_qset + pi->nqsets;
1941                         }
1942                 }
1943
1944                 if (t.qset_idx < q1)
1945                         return -EINVAL;
1946                 if (t.qset_idx > q1 + nqsets - 1)
1947                         return -EINVAL;
1948
1949                 q = &adapter->params.sge.qset[t.qset_idx];
1950
1951                 if (t.rspq_size >= 0)
1952                         q->rspq_size = t.rspq_size;
1953                 if (t.fl_size[0] >= 0)
1954                         q->fl_size = t.fl_size[0];
1955                 if (t.fl_size[1] >= 0)
1956                         q->jumbo_size = t.fl_size[1];
1957                 if (t.txq_size[0] >= 0)
1958                         q->txq_size[0] = t.txq_size[0];
1959                 if (t.txq_size[1] >= 0)
1960                         q->txq_size[1] = t.txq_size[1];
1961                 if (t.txq_size[2] >= 0)
1962                         q->txq_size[2] = t.txq_size[2];
1963                 if (t.cong_thres >= 0)
1964                         q->cong_thres = t.cong_thres;
1965                 if (t.intr_lat >= 0) {
1966                         struct sge_qset *qs =
1967                                 &adapter->sge.qs[t.qset_idx];
1968
1969                         q->coalesce_usecs = t.intr_lat;
1970                         t3_update_qset_coalesce(qs, q);
1971                 }
1972                 if (t.polling >= 0) {
1973                         if (adapter->flags & USING_MSIX)
1974                                 q->polling = t.polling;
1975                         else {
1976                                 /* No polling with INTx for T3A */
1977                                 if (adapter->params.rev == 0 &&
1978                                         !(adapter->flags & USING_MSI))
1979                                         t.polling = 0;
1980
1981                                 for (i = 0; i < SGE_QSETS; i++) {
1982                                         q = &adapter->params.sge.
1983                                                 qset[i];
1984                                         q->polling = t.polling;
1985                                 }
1986                         }
1987                 }
1988                 if (t.lro >= 0)
1989                         set_qset_lro(dev, t.qset_idx, t.lro);
1990
1991                 break;
1992         }
1993         case CHELSIO_GET_QSET_PARAMS:{
1994                 struct qset_params *q;
1995                 struct ch_qset_params t;
1996                 int q1 = pi->first_qset;
1997                 int nqsets = pi->nqsets;
1998                 int i;
1999
2000                 if (copy_from_user(&t, useraddr, sizeof(t)))
2001                         return -EFAULT;
2002
2003                 /* Display qsets for all ports when offload enabled */
2004                 if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
2005                         q1 = 0;
2006                         for_each_port(adapter, i) {
2007                                 pi = adap2pinfo(adapter, i);
2008                                 nqsets = pi->first_qset + pi->nqsets;
2009                         }
2010                 }
2011
2012                 if (t.qset_idx >= nqsets)
2013                         return -EINVAL;
2014
2015                 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2016                 t.rspq_size = q->rspq_size;
2017                 t.txq_size[0] = q->txq_size[0];
2018                 t.txq_size[1] = q->txq_size[1];
2019                 t.txq_size[2] = q->txq_size[2];
2020                 t.fl_size[0] = q->fl_size;
2021                 t.fl_size[1] = q->jumbo_size;
2022                 t.polling = q->polling;
2023                 t.lro = q->lro;
2024                 t.intr_lat = q->coalesce_usecs;
2025                 t.cong_thres = q->cong_thres;
2026                 t.qnum = q1;
2027
2028                 if (adapter->flags & USING_MSIX)
2029                         t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
2030                 else
2031                         t.vector = adapter->pdev->irq;
2032
2033                 if (copy_to_user(useraddr, &t, sizeof(t)))
2034                         return -EFAULT;
2035                 break;
2036         }
2037         case CHELSIO_SET_QSET_NUM:{
2038                 struct ch_reg edata;
2039                 unsigned int i, first_qset = 0, other_qsets = 0;
2040
2041                 if (!capable(CAP_NET_ADMIN))
2042                         return -EPERM;
2043                 if (adapter->flags & FULL_INIT_DONE)
2044                         return -EBUSY;
2045                 if (copy_from_user(&edata, useraddr, sizeof(edata)))
2046                         return -EFAULT;
2047                 if (edata.val < 1 ||
2048                         (edata.val > 1 && !(adapter->flags & USING_MSIX)))
2049                         return -EINVAL;
2050
2051                 for_each_port(adapter, i)
2052                         if (adapter->port[i] && adapter->port[i] != dev)
2053                                 other_qsets += adap2pinfo(adapter, i)->nqsets;
2054
2055                 if (edata.val + other_qsets > SGE_QSETS)
2056                         return -EINVAL;
2057
2058                 pi->nqsets = edata.val;
2059
2060                 for_each_port(adapter, i)
2061                         if (adapter->port[i]) {
2062                                 pi = adap2pinfo(adapter, i);
2063                                 pi->first_qset = first_qset;
2064                                 first_qset += pi->nqsets;
2065                         }
2066                 break;
2067         }
2068         case CHELSIO_GET_QSET_NUM:{
2069                 struct ch_reg edata;
2070
2071                 edata.cmd = CHELSIO_GET_QSET_NUM;
2072                 edata.val = pi->nqsets;
2073                 if (copy_to_user(useraddr, &edata, sizeof(edata)))
2074                         return -EFAULT;
2075                 break;
2076         }
2077         case CHELSIO_LOAD_FW:{
2078                 u8 *fw_data;
2079                 struct ch_mem_range t;
2080
2081                 if (!capable(CAP_SYS_RAWIO))
2082                         return -EPERM;
2083                 if (copy_from_user(&t, useraddr, sizeof(t)))
2084                         return -EFAULT;
2085                 /* Check t.len sanity ? */
2086                 fw_data = kmalloc(t.len, GFP_KERNEL);
2087                 if (!fw_data)
2088                         return -ENOMEM;
2089
2090                 if (copy_from_user
2091                         (fw_data, useraddr + sizeof(t), t.len)) {
2092                         kfree(fw_data);
2093                         return -EFAULT;
2094                 }
2095
2096                 ret = t3_load_fw(adapter, fw_data, t.len);
2097                 kfree(fw_data);
2098                 if (ret)
2099                         return ret;
2100                 break;
2101         }
2102         case CHELSIO_SETMTUTAB:{
2103                 struct ch_mtus m;
2104                 int i;
2105
2106                 if (!is_offload(adapter))
2107                         return -EOPNOTSUPP;
2108                 if (!capable(CAP_NET_ADMIN))
2109                         return -EPERM;
2110                 if (offload_running(adapter))
2111                         return -EBUSY;
2112                 if (copy_from_user(&m, useraddr, sizeof(m)))
2113                         return -EFAULT;
2114                 if (m.nmtus != NMTUS)
2115                         return -EINVAL;
2116                 if (m.mtus[0] < 81)     /* accommodate SACK */
2117                         return -EINVAL;
2118
2119                 /* MTUs must be in ascending order */
2120                 for (i = 1; i < NMTUS; ++i)
2121                         if (m.mtus[i] < m.mtus[i - 1])
2122                                 return -EINVAL;
2123
2124                 memcpy(adapter->params.mtus, m.mtus,
2125                         sizeof(adapter->params.mtus));
2126                 break;
2127         }
2128         case CHELSIO_GET_PM:{
2129                 struct tp_params *p = &adapter->params.tp;
2130                 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
2131
2132                 if (!is_offload(adapter))
2133                         return -EOPNOTSUPP;
2134                 m.tx_pg_sz = p->tx_pg_size;
2135                 m.tx_num_pg = p->tx_num_pgs;
2136                 m.rx_pg_sz = p->rx_pg_size;
2137                 m.rx_num_pg = p->rx_num_pgs;
2138                 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
2139                 if (copy_to_user(useraddr, &m, sizeof(m)))
2140                         return -EFAULT;
2141                 break;
2142         }
2143         case CHELSIO_SET_PM:{
2144                 struct ch_pm m;
2145                 struct tp_params *p = &adapter->params.tp;
2146
2147                 if (!is_offload(adapter))
2148                         return -EOPNOTSUPP;
2149                 if (!capable(CAP_NET_ADMIN))
2150                         return -EPERM;
2151                 if (adapter->flags & FULL_INIT_DONE)
2152                         return -EBUSY;
2153                 if (copy_from_user(&m, useraddr, sizeof(m)))
2154                         return -EFAULT;
2155                 if (!is_power_of_2(m.rx_pg_sz) ||
2156                         !is_power_of_2(m.tx_pg_sz))
2157                         return -EINVAL; /* not power of 2 */
2158                 if (!(m.rx_pg_sz & 0x14000))
2159                         return -EINVAL; /* not 16KB or 64KB */
2160                 if (!(m.tx_pg_sz & 0x1554000))
2161                         return -EINVAL;
2162                 if (m.tx_num_pg == -1)
2163                         m.tx_num_pg = p->tx_num_pgs;
2164                 if (m.rx_num_pg == -1)
2165                         m.rx_num_pg = p->rx_num_pgs;
2166                 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
2167                         return -EINVAL;
2168                 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
2169                         m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
2170                         return -EINVAL;
2171                 p->rx_pg_size = m.rx_pg_sz;
2172                 p->tx_pg_size = m.tx_pg_sz;
2173                 p->rx_num_pgs = m.rx_num_pg;
2174                 p->tx_num_pgs = m.tx_num_pg;
2175                 break;
2176         }
2177         case CHELSIO_GET_MEM:{
2178                 struct ch_mem_range t;
2179                 struct mc7 *mem;
2180                 u64 buf[32];
2181
2182                 if (!is_offload(adapter))
2183                         return -EOPNOTSUPP;
2184                 if (!(adapter->flags & FULL_INIT_DONE))
2185                         return -EIO;    /* need the memory controllers */
2186                 if (copy_from_user(&t, useraddr, sizeof(t)))
2187                         return -EFAULT;
2188                 if ((t.addr & 7) || (t.len & 7))
2189                         return -EINVAL;
2190                 if (t.mem_id == MEM_CM)
2191                         mem = &adapter->cm;
2192                 else if (t.mem_id == MEM_PMRX)
2193                         mem = &adapter->pmrx;
2194                 else if (t.mem_id == MEM_PMTX)
2195                         mem = &adapter->pmtx;
2196                 else
2197                         return -EINVAL;
2198
2199                 /*
2200                  * Version scheme:
2201                  * bits 0..9: chip version
2202                  * bits 10..15: chip revision
2203                  */
2204                 t.version = 3 | (adapter->params.rev << 10);
2205                 if (copy_to_user(useraddr, &t, sizeof(t)))
2206                         return -EFAULT;
2207
2208                 /*
2209                  * Read 256 bytes at a time as len can be large and we don't
2210                  * want to use huge intermediate buffers.
2211                  */
2212                 useraddr += sizeof(t);  /* advance to start of buffer */
2213                 while (t.len) {
2214                         unsigned int chunk =
2215                                 min_t(unsigned int, t.len, sizeof(buf));
2216
2217                         ret =
2218                                 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
2219                                                 buf);
2220                         if (ret)
2221                                 return ret;
2222                         if (copy_to_user(useraddr, buf, chunk))
2223                                 return -EFAULT;
2224                         useraddr += chunk;
2225                         t.addr += chunk;
2226                         t.len -= chunk;
2227                 }
2228                 break;
2229         }
2230         case CHELSIO_SET_TRACE_FILTER:{
2231                 struct ch_trace t;
2232                 const struct trace_params *tp;
2233
2234                 if (!capable(CAP_NET_ADMIN))
2235                         return -EPERM;
2236                 if (!offload_running(adapter))
2237                         return -EAGAIN;
2238                 if (copy_from_user(&t, useraddr, sizeof(t)))
2239                         return -EFAULT;
2240
2241                 tp = (const struct trace_params *)&t.sip;
2242                 if (t.config_tx)
2243                         t3_config_trace_filter(adapter, tp, 0,
2244                                                 t.invert_match,
2245                                                 t.trace_tx);
2246                 if (t.config_rx)
2247                         t3_config_trace_filter(adapter, tp, 1,
2248                                                 t.invert_match,
2249                                                 t.trace_rx);
2250                 break;
2251         }
2252         default:
2253                 return -EOPNOTSUPP;
2254         }
2255         return 0;
2256 }
2257
2258 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2259 {
2260         struct mii_ioctl_data *data = if_mii(req);
2261         struct port_info *pi = netdev_priv(dev);
2262         struct adapter *adapter = pi->adapter;
2263         int ret, mmd;
2264
2265         switch (cmd) {
2266         case SIOCGMIIPHY:
2267                 data->phy_id = pi->phy.addr;
2268                 /* FALLTHRU */
2269         case SIOCGMIIREG:{
2270                 u32 val;
2271                 struct cphy *phy = &pi->phy;
2272
2273                 if (!phy->mdio_read)
2274                         return -EOPNOTSUPP;
2275                 if (is_10G(adapter)) {
2276                         mmd = data->phy_id >> 8;
2277                         if (!mmd)
2278                                 mmd = MDIO_DEV_PCS;
2279                         else if (mmd > MDIO_DEV_VEND2)
2280                                 return -EINVAL;
2281
2282                         ret =
2283                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2284                                                 mmd, data->reg_num, &val);
2285                 } else
2286                         ret =
2287                                 phy->mdio_read(adapter, data->phy_id & 0x1f,
2288                                                 0, data->reg_num & 0x1f,
2289                                                 &val);
2290                 if (!ret)
2291                         data->val_out = val;
2292                 break;
2293         }
2294         case SIOCSMIIREG:{
2295                 struct cphy *phy = &pi->phy;
2296
2297                 if (!capable(CAP_NET_ADMIN))
2298                         return -EPERM;
2299                 if (!phy->mdio_write)
2300                         return -EOPNOTSUPP;
2301                 if (is_10G(adapter)) {
2302                         mmd = data->phy_id >> 8;
2303                         if (!mmd)
2304                                 mmd = MDIO_DEV_PCS;
2305                         else if (mmd > MDIO_DEV_VEND2)
2306                                 return -EINVAL;
2307
2308                         ret =
2309                                 phy->mdio_write(adapter,
2310                                                 data->phy_id & 0x1f, mmd,
2311                                                 data->reg_num,
2312                                                 data->val_in);
2313                 } else
2314                         ret =
2315                                 phy->mdio_write(adapter,
2316                                                 data->phy_id & 0x1f, 0,
2317                                                 data->reg_num & 0x1f,
2318                                                 data->val_in);
2319                 break;
2320         }
2321         case SIOCCHIOCTL:
2322                 return cxgb_extension_ioctl(dev, req->ifr_data);
2323         default:
2324                 return -EOPNOTSUPP;
2325         }
2326         return ret;
2327 }
2328
2329 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2330 {
2331         struct port_info *pi = netdev_priv(dev);
2332         struct adapter *adapter = pi->adapter;
2333         int ret;
2334
2335         if (new_mtu < 81)       /* accommodate SACK */
2336                 return -EINVAL;
2337         if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2338                 return ret;
2339         dev->mtu = new_mtu;
2340         init_port_mtus(adapter);
2341         if (adapter->params.rev == 0 && offload_running(adapter))
2342                 t3_load_mtus(adapter, adapter->params.mtus,
2343                              adapter->params.a_wnd, adapter->params.b_wnd,
2344                              adapter->port[0]->mtu);
2345         return 0;
2346 }
2347
2348 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2349 {
2350         struct port_info *pi = netdev_priv(dev);
2351         struct adapter *adapter = pi->adapter;
2352         struct sockaddr *addr = p;
2353
2354         if (!is_valid_ether_addr(addr->sa_data))
2355                 return -EINVAL;
2356
2357         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2358         t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2359         if (offload_running(adapter))
2360                 write_smt_entry(adapter, pi->port_id);
2361         return 0;
2362 }
2363
2364 /**
2365  * t3_synchronize_rx - wait for current Rx processing on a port to complete
2366  * @adap: the adapter
2367  * @p: the port
2368  *
2369  * Ensures that current Rx processing on any of the queues associated with
2370  * the given port completes before returning.  We do this by acquiring and
2371  * releasing the locks of the response queues associated with the port.
2372  */
2373 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2374 {
2375         int i;
2376
2377         for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
2378                 struct sge_rspq *q = &adap->sge.qs[i].rspq;
2379
2380                 spin_lock_irq(&q->lock);
2381                 spin_unlock_irq(&q->lock);
2382         }
2383 }
2384
2385 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2386 {
2387         struct port_info *pi = netdev_priv(dev);
2388         struct adapter *adapter = pi->adapter;
2389
2390         pi->vlan_grp = grp;
2391         if (adapter->params.rev > 0)
2392                 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2393         else {
2394                 /* single control for all ports */
2395                 unsigned int i, have_vlans = 0;
2396                 for_each_port(adapter, i)
2397                     have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2398
2399                 t3_set_vlan_accel(adapter, 1, have_vlans);
2400         }
2401         t3_synchronize_rx(adapter, pi);
2402 }
2403
2404 #ifdef CONFIG_NET_POLL_CONTROLLER
2405 static void cxgb_netpoll(struct net_device *dev)
2406 {
2407         struct port_info *pi = netdev_priv(dev);
2408         struct adapter *adapter = pi->adapter;
2409         int qidx;
2410
2411         for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2412                 struct sge_qset *qs = &adapter->sge.qs[qidx];
2413                 void *source;
2414
2415                 if (adapter->flags & USING_MSIX)
2416                         source = qs;
2417                 else
2418                         source = adapter;
2419
2420                 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2421         }
2422 }
2423 #endif
2424
2425 /*
2426  * Periodic accumulation of MAC statistics.
2427  */
2428 static void mac_stats_update(struct adapter *adapter)
2429 {
2430         int i;
2431
2432         for_each_port(adapter, i) {
2433                 struct net_device *dev = adapter->port[i];
2434                 struct port_info *p = netdev_priv(dev);
2435
2436                 if (netif_running(dev)) {
2437                         spin_lock(&adapter->stats_lock);
2438                         t3_mac_update_stats(&p->mac);
2439                         spin_unlock(&adapter->stats_lock);
2440                 }
2441         }
2442 }
2443
2444 static void check_link_status(struct adapter *adapter)
2445 {
2446         int i;
2447
2448         for_each_port(adapter, i) {
2449                 struct net_device *dev = adapter->port[i];
2450                 struct port_info *p = netdev_priv(dev);
2451
2452                 if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev))
2453                         t3_link_changed(adapter, i);
2454         }
2455 }
2456
2457 static void check_t3b2_mac(struct adapter *adapter)
2458 {
2459         int i;
2460
2461         if (!rtnl_trylock())    /* synchronize with ifdown */
2462                 return;
2463
2464         for_each_port(adapter, i) {
2465                 struct net_device *dev = adapter->port[i];
2466                 struct port_info *p = netdev_priv(dev);
2467                 int status;
2468
2469                 if (!netif_running(dev))
2470                         continue;
2471
2472                 status = 0;
2473                 if (netif_running(dev) && netif_carrier_ok(dev))
2474                         status = t3b2_mac_watchdog_task(&p->mac);
2475                 if (status == 1)
2476                         p->mac.stats.num_toggled++;
2477                 else if (status == 2) {
2478                         struct cmac *mac = &p->mac;
2479
2480                         t3_mac_set_mtu(mac, dev->mtu);
2481                         t3_mac_set_address(mac, 0, dev->dev_addr);
2482                         cxgb_set_rxmode(dev);
2483                         t3_link_start(&p->phy, mac, &p->link_config);
2484                         t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2485                         t3_port_intr_enable(adapter, p->port_id);
2486                         p->mac.stats.num_resets++;
2487                 }
2488         }
2489         rtnl_unlock();
2490 }
2491
2492
2493 static void t3_adap_check_task(struct work_struct *work)
2494 {
2495         struct adapter *adapter = container_of(work, struct adapter,
2496                                                adap_check_task.work);
2497         const struct adapter_params *p = &adapter->params;
2498
2499         adapter->check_task_cnt++;
2500
2501         /* Check link status for PHYs without interrupts */
2502         if (p->linkpoll_period)
2503                 check_link_status(adapter);
2504
2505         /* Accumulate MAC stats if needed */
2506         if (!p->linkpoll_period ||
2507             (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2508             p->stats_update_period) {
2509                 mac_stats_update(adapter);
2510                 adapter->check_task_cnt = 0;
2511         }
2512
2513         if (p->rev == T3_REV_B2)
2514                 check_t3b2_mac(adapter);
2515
2516         /* Schedule the next check update if any port is active. */
2517         spin_lock_irq(&adapter->work_lock);
2518         if (adapter->open_device_map & PORT_MASK)
2519                 schedule_chk_task(adapter);
2520         spin_unlock_irq(&adapter->work_lock);
2521 }
2522
2523 /*
2524  * Processes external (PHY) interrupts in process context.
2525  */
2526 static void ext_intr_task(struct work_struct *work)
2527 {
2528         struct adapter *adapter = container_of(work, struct adapter,
2529                                                ext_intr_handler_task);
2530
2531         t3_phy_intr_handler(adapter);
2532
2533         /* Now reenable external interrupts */
2534         spin_lock_irq(&adapter->work_lock);
2535         if (adapter->slow_intr_mask) {
2536                 adapter->slow_intr_mask |= F_T3DBG;
2537                 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2538                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2539                              adapter->slow_intr_mask);
2540         }
2541         spin_unlock_irq(&adapter->work_lock);
2542 }
2543
2544 /*
2545  * Interrupt-context handler for external (PHY) interrupts.
2546  */
2547 void t3_os_ext_intr_handler(struct adapter *adapter)
2548 {
2549         /*
2550          * Schedule a task to handle external interrupts as they may be slow
2551          * and we use a mutex to protect MDIO registers.  We disable PHY
2552          * interrupts in the meantime and let the task reenable them when
2553          * it's done.
2554          */
2555         spin_lock(&adapter->work_lock);
2556         if (adapter->slow_intr_mask) {
2557                 adapter->slow_intr_mask &= ~F_T3DBG;
2558                 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2559                              adapter->slow_intr_mask);
2560                 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2561         }
2562         spin_unlock(&adapter->work_lock);
2563 }
2564
2565 static int t3_adapter_error(struct adapter *adapter, int reset)
2566 {
2567         int i, ret = 0;
2568
2569         /* Stop all ports */
2570         for_each_port(adapter, i) {
2571                 struct net_device *netdev = adapter->port[i];
2572
2573                 if (netif_running(netdev))
2574                         cxgb_close(netdev);
2575         }
2576
2577         if (is_offload(adapter) &&
2578             test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
2579                 offload_close(&adapter->tdev);
2580
2581         /* Stop SGE timers */
2582         t3_stop_sge_timers(adapter);
2583
2584         adapter->flags &= ~FULL_INIT_DONE;
2585
2586         if (reset)
2587                 ret = t3_reset_adapter(adapter);
2588
2589         pci_disable_device(adapter->pdev);
2590
2591         return ret;
2592 }
2593
2594 static int t3_reenable_adapter(struct adapter *adapter)
2595 {
2596         if (pci_enable_device(adapter->pdev)) {
2597                 dev_err(&adapter->pdev->dev,
2598                         "Cannot re-enable PCI device after reset.\n");
2599                 goto err;
2600         }
2601         pci_set_master(adapter->pdev);
2602         pci_restore_state(adapter->pdev);
2603
2604         /* Free sge resources */
2605         t3_free_sge_resources(adapter);
2606
2607         if (t3_replay_prep_adapter(adapter))
2608                 goto err;
2609
2610         return 0;
2611 err:
2612         return -1;
2613 }
2614
2615 static void t3_resume_ports(struct adapter *adapter)
2616 {
2617         int i;
2618
2619         /* Restart the ports */
2620         for_each_port(adapter, i) {
2621                 struct net_device *netdev = adapter->port[i];
2622
2623                 if (netif_running(netdev)) {
2624                         if (cxgb_open(netdev)) {
2625                                 dev_err(&adapter->pdev->dev,
2626                                         "can't bring device back up"
2627                                         " after reset\n");
2628                                 continue;
2629                         }
2630                 }
2631         }
2632 }
2633
2634 /*
2635  * processes a fatal error.
2636  * Bring the ports down, reset the chip, bring the ports back up.
2637  */
2638 static void fatal_error_task(struct work_struct *work)
2639 {
2640         struct adapter *adapter = container_of(work, struct adapter,
2641                                                fatal_error_handler_task);
2642         int err = 0;
2643
2644         rtnl_lock();
2645         err = t3_adapter_error(adapter, 1);
2646         if (!err)
2647                 err = t3_reenable_adapter(adapter);
2648         if (!err)
2649                 t3_resume_ports(adapter);
2650
2651         CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
2652         rtnl_unlock();
2653 }
2654
2655 void t3_fatal_err(struct adapter *adapter)
2656 {
2657         unsigned int fw_status[4];
2658
2659         if (adapter->flags & FULL_INIT_DONE) {
2660                 t3_sge_stop(adapter);
2661                 t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
2662                 t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
2663                 t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
2664                 t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
2665
2666                 spin_lock(&adapter->work_lock);
2667                 t3_intr_disable(adapter);
2668                 queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
2669                 spin_unlock(&adapter->work_lock);
2670         }
2671         CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2672         if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2673                 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2674                          fw_status[0], fw_status[1],
2675                          fw_status[2], fw_status[3]);
2676
2677 }
2678
2679 /**
2680  * t3_io_error_detected - called when PCI error is detected
2681  * @pdev: Pointer to PCI device
2682  * @state: The current pci connection state
2683  *
2684  * This function is called after a PCI bus error affecting
2685  * this device has been detected.
2686  */
2687 static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
2688                                              pci_channel_state_t state)
2689 {
2690         struct adapter *adapter = pci_get_drvdata(pdev);
2691         int ret;
2692
2693         ret = t3_adapter_error(adapter, 0);
2694
2695         /* Request a slot reset. */
2696         return PCI_ERS_RESULT_NEED_RESET;
2697 }
2698
2699 /**
2700  * t3_io_slot_reset - called after the pci bus has been reset.
2701  * @pdev: Pointer to PCI device
2702  *
2703  * Restart the card from scratch, as if from a cold-boot.
2704  */
2705 static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
2706 {
2707         struct adapter *adapter = pci_get_drvdata(pdev);
2708
2709         if (!t3_reenable_adapter(adapter))
2710                 return PCI_ERS_RESULT_RECOVERED;
2711
2712         return PCI_ERS_RESULT_DISCONNECT;
2713 }
2714
2715 /**
2716  * t3_io_resume - called when traffic can start flowing again.
2717  * @pdev: Pointer to PCI device
2718  *
2719  * This callback is called when the error recovery driver tells us that
2720  * its OK to resume normal operation.
2721  */
2722 static void t3_io_resume(struct pci_dev *pdev)
2723 {
2724         struct adapter *adapter = pci_get_drvdata(pdev);
2725
2726         t3_resume_ports(adapter);
2727 }
2728
2729 static struct pci_error_handlers t3_err_handler = {
2730         .error_detected = t3_io_error_detected,
2731         .slot_reset = t3_io_slot_reset,
2732         .resume = t3_io_resume,
2733 };
2734
2735 /*
2736  * Set the number of qsets based on the number of CPUs and the number of ports,
2737  * not to exceed the number of available qsets, assuming there are enough qsets
2738  * per port in HW.
2739  */
2740 static void set_nqsets(struct adapter *adap)
2741 {
2742         int i, j = 0;
2743         int num_cpus = num_online_cpus();
2744         int hwports = adap->params.nports;
2745         int nqsets = SGE_QSETS;
2746
2747         if (adap->params.rev > 0) {
2748                 if (hwports == 2 &&
2749                     (hwports * nqsets > SGE_QSETS ||
2750                      num_cpus >= nqsets / hwports))
2751                         nqsets /= hwports;
2752                 if (nqsets > num_cpus)
2753                         nqsets = num_cpus;
2754                 if (nqsets < 1 || hwports == 4)
2755                         nqsets = 1;
2756         } else
2757                 nqsets = 1;
2758
2759         for_each_port(adap, i) {
2760                 struct port_info *pi = adap2pinfo(adap, i);
2761
2762                 pi->first_qset = j;
2763                 pi->nqsets = nqsets;
2764                 j = pi->first_qset + nqsets;
2765
2766                 dev_info(&adap->pdev->dev,
2767                          "Port %d using %d queue sets.\n", i, nqsets);
2768         }
2769 }
2770
2771 static int __devinit cxgb_enable_msix(struct adapter *adap)
2772 {
2773         struct msix_entry entries[SGE_QSETS + 1];
2774         int i, err;
2775
2776         for (i = 0; i < ARRAY_SIZE(entries); ++i)
2777                 entries[i].entry = i;
2778
2779         err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2780         if (!err) {
2781                 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2782                         adap->msix_info[i].vec = entries[i].vector;
2783         } else if (err > 0)
2784                 dev_info(&adap->pdev->dev,
2785                        "only %d MSI-X vectors left, not using MSI-X\n", err);
2786         return err;
2787 }
2788
2789 static void __devinit print_port_info(struct adapter *adap,
2790                                       const struct adapter_info *ai)
2791 {
2792         static const char *pci_variant[] = {
2793                 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2794         };
2795
2796         int i;
2797         char buf[80];
2798
2799         if (is_pcie(adap))
2800                 snprintf(buf, sizeof(buf), "%s x%d",
2801                          pci_variant[adap->params.pci.variant],
2802                          adap->params.pci.width);
2803         else
2804                 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2805                          pci_variant[adap->params.pci.variant],
2806                          adap->params.pci.speed, adap->params.pci.width);
2807
2808         for_each_port(adap, i) {
2809                 struct net_device *dev = adap->port[i];
2810                 const struct port_info *pi = netdev_priv(dev);
2811
2812                 if (!test_bit(i, &adap->registered_device_map))
2813                         continue;
2814                 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2815                        dev->name, ai->desc, pi->phy.desc,
2816                        is_offload(adap) ? "R" : "", adap->params.rev, buf,
2817                        (adap->flags & USING_MSIX) ? " MSI-X" :
2818                        (adap->flags & USING_MSI) ? " MSI" : "");
2819                 if (adap->name == dev->name && adap->params.vpd.mclk)
2820                         printk(KERN_INFO
2821                                "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2822                                adap->name, t3_mc7_size(&adap->cm) >> 20,
2823                                t3_mc7_size(&adap->pmtx) >> 20,
2824                                t3_mc7_size(&adap->pmrx) >> 20,
2825                                adap->params.vpd.sn);
2826         }
2827 }
2828
2829 static int __devinit init_one(struct pci_dev *pdev,
2830                               const struct pci_device_id *ent)
2831 {
2832         static int version_printed;
2833
2834         int i, err, pci_using_dac = 0;
2835         unsigned long mmio_start, mmio_len;
2836         const struct adapter_info *ai;
2837         struct adapter *adapter = NULL;
2838         struct port_info *pi;
2839
2840         if (!version_printed) {
2841                 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2842                 ++version_printed;
2843         }
2844
2845         if (!cxgb3_wq) {
2846                 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2847                 if (!cxgb3_wq) {
2848                         printk(KERN_ERR DRV_NAME
2849                                ": cannot initialize work queue\n");
2850                         return -ENOMEM;
2851                 }
2852         }
2853
2854         err = pci_request_regions(pdev, DRV_NAME);
2855         if (err) {
2856                 /* Just info, some other driver may have claimed the device. */
2857                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2858                 return err;
2859         }
2860
2861         err = pci_enable_device(pdev);
2862         if (err) {
2863                 dev_err(&pdev->dev, "cannot enable PCI device\n");
2864                 goto out_release_regions;
2865         }
2866
2867         if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2868                 pci_using_dac = 1;
2869                 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2870                 if (err) {
2871                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2872                                "coherent allocations\n");
2873                         goto out_disable_device;
2874                 }
2875         } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2876                 dev_err(&pdev->dev, "no usable DMA configuration\n");
2877                 goto out_disable_device;
2878         }
2879
2880         pci_set_master(pdev);
2881         pci_save_state(pdev);
2882
2883         mmio_start = pci_resource_start(pdev, 0);
2884         mmio_len = pci_resource_len(pdev, 0);
2885         ai = t3_get_adapter_info(ent->driver_data);
2886
2887         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2888         if (!adapter) {
2889                 err = -ENOMEM;
2890                 goto out_disable_device;
2891         }
2892
2893         adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2894         if (!adapter->regs) {
2895                 dev_err(&pdev->dev, "cannot map device registers\n");
2896                 err = -ENOMEM;
2897                 goto out_free_adapter;
2898         }
2899
2900         adapter->pdev = pdev;
2901         adapter->name = pci_name(pdev);
2902         adapter->msg_enable = dflt_msg_enable;
2903         adapter->mmio_len = mmio_len;
2904
2905         mutex_init(&adapter->mdio_lock);
2906         spin_lock_init(&adapter->work_lock);
2907         spin_lock_init(&adapter->stats_lock);
2908
2909         INIT_LIST_HEAD(&adapter->adapter_list);
2910         INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2911         INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
2912         INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2913
2914         for (i = 0; i < ai->nports; ++i) {
2915                 struct net_device *netdev;
2916
2917                 netdev = alloc_etherdev(sizeof(struct port_info));
2918                 if (!netdev) {
2919                         err = -ENOMEM;
2920                         goto out_free_dev;
2921                 }
2922
2923                 SET_NETDEV_DEV(netdev, &pdev->dev);
2924
2925                 adapter->port[i] = netdev;
2926                 pi = netdev_priv(netdev);
2927                 pi->adapter = adapter;
2928                 pi->rx_csum_offload = 1;
2929                 pi->port_id = i;
2930                 netif_carrier_off(netdev);
2931                 netdev->irq = pdev->irq;
2932                 netdev->mem_start = mmio_start;
2933                 netdev->mem_end = mmio_start + mmio_len - 1;
2934                 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2935                 netdev->features |= NETIF_F_LLTX;
2936                 if (pci_using_dac)
2937                         netdev->features |= NETIF_F_HIGHDMA;
2938
2939                 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2940                 netdev->vlan_rx_register = vlan_rx_register;
2941
2942                 netdev->open = cxgb_open;
2943                 netdev->stop = cxgb_close;
2944                 netdev->hard_start_xmit = t3_eth_xmit;
2945                 netdev->get_stats = cxgb_get_stats;
2946                 netdev->set_multicast_list = cxgb_set_rxmode;
2947                 netdev->do_ioctl = cxgb_ioctl;
2948                 netdev->change_mtu = cxgb_change_mtu;
2949                 netdev->set_mac_address = cxgb_set_mac_addr;
2950 #ifdef CONFIG_NET_POLL_CONTROLLER
2951                 netdev->poll_controller = cxgb_netpoll;
2952 #endif
2953
2954                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2955         }
2956
2957         pci_set_drvdata(pdev, adapter);
2958         if (t3_prep_adapter(adapter, ai, 1) < 0) {
2959                 err = -ENODEV;
2960                 goto out_free_dev;
2961         }
2962
2963         /*
2964          * The card is now ready to go.  If any errors occur during device
2965          * registration we do not fail the whole card but rather proceed only
2966          * with the ports we manage to register successfully.  However we must
2967          * register at least one net device.
2968          */
2969         for_each_port(adapter, i) {
2970                 err = register_netdev(adapter->port[i]);
2971                 if (err)
2972                         dev_warn(&pdev->dev,
2973                                  "cannot register net device %s, skipping\n",
2974                                  adapter->port[i]->name);
2975                 else {
2976                         /*
2977                          * Change the name we use for messages to the name of
2978                          * the first successfully registered interface.
2979                          */
2980                         if (!adapter->registered_device_map)
2981                                 adapter->name = adapter->port[i]->name;
2982
2983                         __set_bit(i, &adapter->registered_device_map);
2984                 }
2985         }
2986         if (!adapter->registered_device_map) {
2987                 dev_err(&pdev->dev, "could not register any net devices\n");
2988                 goto out_free_dev;
2989         }
2990
2991         /* Driver's ready. Reflect it on LEDs */
2992         t3_led_ready(adapter);
2993
2994         if (is_offload(adapter)) {
2995                 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2996                 cxgb3_adapter_ofld(adapter);
2997         }
2998
2999         /* See what interrupts we'll be using */
3000         if (msi > 1 && cxgb_enable_msix(adapter) == 0)
3001                 adapter->flags |= USING_MSIX;
3002         else if (msi > 0 && pci_enable_msi(pdev) == 0)
3003                 adapter->flags |= USING_MSI;
3004
3005         set_nqsets(adapter);
3006
3007         err = sysfs_create_group(&adapter->port[0]->dev.kobj,
3008                                  &cxgb3_attr_group);
3009
3010         print_port_info(adapter, ai);
3011         return 0;
3012
3013 out_free_dev:
3014         iounmap(adapter->regs);
3015         for (i = ai->nports - 1; i >= 0; --i)
3016                 if (adapter->port[i])
3017                         free_netdev(adapter->port[i]);
3018
3019 out_free_adapter:
3020         kfree(adapter);
3021
3022 out_disable_device:
3023         pci_disable_device(pdev);
3024 out_release_regions:
3025         pci_release_regions(pdev);
3026         pci_set_drvdata(pdev, NULL);
3027         return err;
3028 }
3029
3030 static void __devexit remove_one(struct pci_dev *pdev)
3031 {
3032         struct adapter *adapter = pci_get_drvdata(pdev);
3033
3034         if (adapter) {
3035                 int i;
3036
3037                 t3_sge_stop(adapter);
3038                 sysfs_remove_group(&adapter->port[0]->dev.kobj,
3039                                    &cxgb3_attr_group);
3040
3041                 if (is_offload(adapter)) {
3042                         cxgb3_adapter_unofld(adapter);
3043                         if (test_bit(OFFLOAD_DEVMAP_BIT,
3044                                      &adapter->open_device_map))
3045                                 offload_close(&adapter->tdev);
3046                 }
3047
3048                 for_each_port(adapter, i)
3049                     if (test_bit(i, &adapter->registered_device_map))
3050                         unregister_netdev(adapter->port[i]);
3051
3052                 t3_stop_sge_timers(adapter);
3053                 t3_free_sge_resources(adapter);
3054                 cxgb_disable_msi(adapter);
3055
3056                 for_each_port(adapter, i)
3057                         if (adapter->port[i])
3058                                 free_netdev(adapter->port[i]);
3059
3060                 iounmap(adapter->regs);
3061                 kfree(adapter);
3062                 pci_release_regions(pdev);
3063                 pci_disable_device(pdev);
3064                 pci_set_drvdata(pdev, NULL);
3065         }
3066 }
3067
3068 static struct pci_driver driver = {
3069         .name = DRV_NAME,
3070         .id_table = cxgb3_pci_tbl,
3071         .probe = init_one,
3072         .remove = __devexit_p(remove_one),
3073         .err_handler = &t3_err_handler,
3074 };
3075
3076 static int __init cxgb3_init_module(void)
3077 {
3078         int ret;
3079
3080         cxgb3_offload_init();
3081
3082         ret = pci_register_driver(&driver);
3083         return ret;
3084 }
3085
3086 static void __exit cxgb3_cleanup_module(void)
3087 {
3088         pci_unregister_driver(&driver);
3089         if (cxgb3_wq)
3090                 destroy_workqueue(cxgb3_wq);
3091 }
3092
3093 module_init(cxgb3_init_module);
3094 module_exit(cxgb3_cleanup_module);