Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[pandora-kernel.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
64
65 #include "cxgb4.h"
66 #include "t4_regs.h"
67 #include "t4_msg.h"
68 #include "t4fw_api.h"
69 #include "l2t.h"
70
71 #define DRV_VERSION "1.3.0-ko"
72 #define DRV_DESC "Chelsio T4 Network Driver"
73
74 /*
75  * Max interrupt hold-off timer value in us.  Queues fall back to this value
76  * under extreme memory pressure so it's largish to give the system time to
77  * recover.
78  */
79 #define MAX_SGE_TIMERVAL 200U
80
81 enum {
82         /*
83          * Physical Function provisioning constants.
84          */
85         PFRES_NVI = 4,                  /* # of Virtual Interfaces */
86         PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
87         PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
88                                          */
89         PFRES_NEQ = 256,                /* # of egress queues */
90         PFRES_NIQ = 0,                  /* # of ingress queues */
91         PFRES_TC = 0,                   /* PCI-E traffic class */
92         PFRES_NEXACTF = 128,            /* # of exact MPS filters */
93
94         PFRES_R_CAPS = FW_CMD_CAP_PF,
95         PFRES_WX_CAPS = FW_CMD_CAP_PF,
96
97 #ifdef CONFIG_PCI_IOV
98         /*
99          * Virtual Function provisioning constants.  We need two extra Ingress
100          * Queues with Interrupt capability to serve as the VF's Firmware
101          * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102          * neither will have Free Lists associated with them).  For each
103          * Ethernet/Control Egress Queue and for each Free List, we need an
104          * Egress Context.
105          */
106         VFRES_NPORTS = 1,               /* # of "ports" per VF */
107         VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
108
109         VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
110         VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
111         VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112         VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
113         VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
114         VFRES_TC = 0,                   /* PCI-E traffic class */
115         VFRES_NEXACTF = 16,             /* # of exact MPS filters */
116
117         VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118         VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
119 #endif
120 };
121
122 /*
123  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
124  * static and likely not to be useful in the long run.  We really need to
125  * implement some form of persistent configuration which the firmware
126  * controls.
127  */
128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129                                   unsigned int pf, unsigned int vf)
130 {
131         unsigned int portn, portvec;
132
133         /*
134          * Give PF's access to all of the ports.
135          */
136         if (vf == 0)
137                 return FW_PFVF_CMD_PMASK_MASK;
138
139         /*
140          * For VFs, we'll assign them access to the ports based purely on the
141          * PF.  We assign active ports in order, wrapping around if there are
142          * fewer active ports than PFs: e.g. active port[pf % nports].
143          * Unfortunately the adapter's port_info structs haven't been
144          * initialized yet so we have to compute this.
145          */
146         if (adapter->params.nports == 0)
147                 return 0;
148
149         portn = pf % adapter->params.nports;
150         portvec = adapter->params.portvec;
151         for (;;) {
152                 /*
153                  * Isolate the lowest set bit in the port vector.  If we're at
154                  * the port number that we want, return that as the pmask.
155                  * otherwise mask that bit out of the port vector and
156                  * decrement our port number ...
157                  */
158                 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159                 if (portn == 0)
160                         return pmask;
161                 portn--;
162                 portvec &= ~pmask;
163         }
164         /*NOTREACHED*/
165 }
166
167 enum {
168         MAX_TXQ_ENTRIES      = 16384,
169         MAX_CTRL_TXQ_ENTRIES = 1024,
170         MAX_RSPQ_ENTRIES     = 16384,
171         MAX_RX_BUFFERS       = 16384,
172         MIN_TXQ_ENTRIES      = 32,
173         MIN_CTRL_TXQ_ENTRIES = 32,
174         MIN_RSPQ_ENTRIES     = 128,
175         MIN_FL_ENTRIES       = 16
176 };
177
178 /* Host shadow copy of ingress filter entry.  This is in host native format
179  * and doesn't match the ordering or bit order, etc. of the hardware of the
180  * firmware command.  The use of bit-field structure elements is purely to
181  * remind ourselves of the field size limitations and save memory in the case
182  * where the filter table is large.
183  */
184 struct filter_entry {
185         /* Administrative fields for filter.
186          */
187         u32 valid:1;            /* filter allocated and valid */
188         u32 locked:1;           /* filter is administratively locked */
189
190         u32 pending:1;          /* filter action is pending firmware reply */
191         u32 smtidx:8;           /* Source MAC Table index for smac */
192         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
193
194         /* The filter itself.  Most of this is a straight copy of information
195          * provided by the extended ioctl().  Some fields are translated to
196          * internal forms -- for instance the Ingress Queue ID passed in from
197          * the ioctl() is translated into the Absolute Ingress Queue ID.
198          */
199         struct ch_filter_specification fs;
200 };
201
202 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
205
206 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
207
208 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
209         CH_DEVICE(0xa000, 0),  /* PE10K */
210         CH_DEVICE(0x4001, -1),
211         CH_DEVICE(0x4002, -1),
212         CH_DEVICE(0x4003, -1),
213         CH_DEVICE(0x4004, -1),
214         CH_DEVICE(0x4005, -1),
215         CH_DEVICE(0x4006, -1),
216         CH_DEVICE(0x4007, -1),
217         CH_DEVICE(0x4008, -1),
218         CH_DEVICE(0x4009, -1),
219         CH_DEVICE(0x400a, -1),
220         CH_DEVICE(0x4401, 4),
221         CH_DEVICE(0x4402, 4),
222         CH_DEVICE(0x4403, 4),
223         CH_DEVICE(0x4404, 4),
224         CH_DEVICE(0x4405, 4),
225         CH_DEVICE(0x4406, 4),
226         CH_DEVICE(0x4407, 4),
227         CH_DEVICE(0x4408, 4),
228         CH_DEVICE(0x4409, 4),
229         CH_DEVICE(0x440a, 4),
230         CH_DEVICE(0x440d, 4),
231         CH_DEVICE(0x440e, 4),
232         { 0, }
233 };
234
235 #define FW_FNAME "cxgb4/t4fw.bin"
236 #define FW_CFNAME "cxgb4/t4-config.txt"
237
238 MODULE_DESCRIPTION(DRV_DESC);
239 MODULE_AUTHOR("Chelsio Communications");
240 MODULE_LICENSE("Dual BSD/GPL");
241 MODULE_VERSION(DRV_VERSION);
242 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
243 MODULE_FIRMWARE(FW_FNAME);
244
245 /*
246  * Normally we're willing to become the firmware's Master PF but will be happy
247  * if another PF has already become the Master and initialized the adapter.
248  * Setting "force_init" will cause this driver to forcibly establish itself as
249  * the Master PF and initialize the adapter.
250  */
251 static uint force_init;
252
253 module_param(force_init, uint, 0644);
254 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
255
256 /*
257  * Normally if the firmware we connect to has Configuration File support, we
258  * use that and only fall back to the old Driver-based initialization if the
259  * Configuration File fails for some reason.  If force_old_init is set, then
260  * we'll always use the old Driver-based initialization sequence.
261  */
262 static uint force_old_init;
263
264 module_param(force_old_init, uint, 0644);
265 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
266
267 static int dflt_msg_enable = DFLT_MSG_ENABLE;
268
269 module_param(dflt_msg_enable, int, 0644);
270 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
271
272 /*
273  * The driver uses the best interrupt scheme available on a platform in the
274  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
275  * of these schemes the driver may consider as follows:
276  *
277  * msi = 2: choose from among all three options
278  * msi = 1: only consider MSI and INTx interrupts
279  * msi = 0: force INTx interrupts
280  */
281 static int msi = 2;
282
283 module_param(msi, int, 0644);
284 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
285
286 /*
287  * Queue interrupt hold-off timer values.  Queues default to the first of these
288  * upon creation.
289  */
290 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
291
292 module_param_array(intr_holdoff, uint, NULL, 0644);
293 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
294                  "0..4 in microseconds");
295
296 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
297
298 module_param_array(intr_cnt, uint, NULL, 0644);
299 MODULE_PARM_DESC(intr_cnt,
300                  "thresholds 1..3 for queue interrupt packet counters");
301
302 /*
303  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
304  * offset by 2 bytes in order to have the IP headers line up on 4-byte
305  * boundaries.  This is a requirement for many architectures which will throw
306  * a machine check fault if an attempt is made to access one of the 4-byte IP
307  * header fields on a non-4-byte boundary.  And it's a major performance issue
308  * even on some architectures which allow it like some implementations of the
309  * x86 ISA.  However, some architectures don't mind this and for some very
310  * edge-case performance sensitive applications (like forwarding large volumes
311  * of small packets), setting this DMA offset to 0 will decrease the number of
312  * PCI-E Bus transfers enough to measurably affect performance.
313  */
314 static int rx_dma_offset = 2;
315
316 static bool vf_acls;
317
318 #ifdef CONFIG_PCI_IOV
319 module_param(vf_acls, bool, 0644);
320 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
321
322 static unsigned int num_vf[4];
323
324 module_param_array(num_vf, uint, NULL, 0644);
325 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
326 #endif
327
328 /*
329  * The filter TCAM has a fixed portion and a variable portion.  The fixed
330  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
331  * ports.  The variable portion is 36 bits which can include things like Exact
332  * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
333  * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
334  * far exceed the 36-bit budget for this "compressed" header portion of the
335  * filter.  Thus, we have a scarce resource which must be carefully managed.
336  *
337  * By default we set this up to mostly match the set of filter matching
338  * capabilities of T3 but with accommodations for some of T4's more
339  * interesting features:
340  *
341  *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
342  *     [Inner] VLAN (17), Port (3), FCoE (1) }
343  */
344 enum {
345         TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
346         TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
347         TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
348 };
349
350 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
351
352 module_param(tp_vlan_pri_map, uint, 0644);
353 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
354
355 static struct dentry *cxgb4_debugfs_root;
356
357 static LIST_HEAD(adapter_list);
358 static DEFINE_MUTEX(uld_mutex);
359 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
360 static const char *uld_str[] = { "RDMA", "iSCSI" };
361
362 static void link_report(struct net_device *dev)
363 {
364         if (!netif_carrier_ok(dev))
365                 netdev_info(dev, "link down\n");
366         else {
367                 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
368
369                 const char *s = "10Mbps";
370                 const struct port_info *p = netdev_priv(dev);
371
372                 switch (p->link_cfg.speed) {
373                 case SPEED_10000:
374                         s = "10Gbps";
375                         break;
376                 case SPEED_1000:
377                         s = "1000Mbps";
378                         break;
379                 case SPEED_100:
380                         s = "100Mbps";
381                         break;
382                 }
383
384                 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
385                             fc[p->link_cfg.fc]);
386         }
387 }
388
389 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
390 {
391         struct net_device *dev = adapter->port[port_id];
392
393         /* Skip changes from disabled ports. */
394         if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
395                 if (link_stat)
396                         netif_carrier_on(dev);
397                 else
398                         netif_carrier_off(dev);
399
400                 link_report(dev);
401         }
402 }
403
404 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
405 {
406         static const char *mod_str[] = {
407                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
408         };
409
410         const struct net_device *dev = adap->port[port_id];
411         const struct port_info *pi = netdev_priv(dev);
412
413         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
414                 netdev_info(dev, "port module unplugged\n");
415         else if (pi->mod_type < ARRAY_SIZE(mod_str))
416                 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
417 }
418
419 /*
420  * Configure the exact and hash address filters to handle a port's multicast
421  * and secondary unicast MAC addresses.
422  */
423 static int set_addr_filters(const struct net_device *dev, bool sleep)
424 {
425         u64 mhash = 0;
426         u64 uhash = 0;
427         bool free = true;
428         u16 filt_idx[7];
429         const u8 *addr[7];
430         int ret, naddr = 0;
431         const struct netdev_hw_addr *ha;
432         int uc_cnt = netdev_uc_count(dev);
433         int mc_cnt = netdev_mc_count(dev);
434         const struct port_info *pi = netdev_priv(dev);
435         unsigned int mb = pi->adapter->fn;
436
437         /* first do the secondary unicast addresses */
438         netdev_for_each_uc_addr(ha, dev) {
439                 addr[naddr++] = ha->addr;
440                 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
441                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
442                                         naddr, addr, filt_idx, &uhash, sleep);
443                         if (ret < 0)
444                                 return ret;
445
446                         free = false;
447                         naddr = 0;
448                 }
449         }
450
451         /* next set up the multicast addresses */
452         netdev_for_each_mc_addr(ha, dev) {
453                 addr[naddr++] = ha->addr;
454                 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
455                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
456                                         naddr, addr, filt_idx, &mhash, sleep);
457                         if (ret < 0)
458                                 return ret;
459
460                         free = false;
461                         naddr = 0;
462                 }
463         }
464
465         return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
466                                 uhash | mhash, sleep);
467 }
468
469 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
470 module_param(dbfifo_int_thresh, int, 0644);
471 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
472
473 /*
474  * usecs to sleep while draining the dbfifo
475  */
476 static int dbfifo_drain_delay = 1000;
477 module_param(dbfifo_drain_delay, int, 0644);
478 MODULE_PARM_DESC(dbfifo_drain_delay,
479                  "usecs to sleep while draining the dbfifo");
480
481 /*
482  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
483  * If @mtu is -1 it is left unchanged.
484  */
485 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
486 {
487         int ret;
488         struct port_info *pi = netdev_priv(dev);
489
490         ret = set_addr_filters(dev, sleep_ok);
491         if (ret == 0)
492                 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
493                                     (dev->flags & IFF_PROMISC) ? 1 : 0,
494                                     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
495                                     sleep_ok);
496         return ret;
497 }
498
499 static struct workqueue_struct *workq;
500
501 /**
502  *      link_start - enable a port
503  *      @dev: the port to enable
504  *
505  *      Performs the MAC and PHY actions needed to enable a port.
506  */
507 static int link_start(struct net_device *dev)
508 {
509         int ret;
510         struct port_info *pi = netdev_priv(dev);
511         unsigned int mb = pi->adapter->fn;
512
513         /*
514          * We do not set address filters and promiscuity here, the stack does
515          * that step explicitly.
516          */
517         ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
518                             !!(dev->features & NETIF_F_HW_VLAN_RX), true);
519         if (ret == 0) {
520                 ret = t4_change_mac(pi->adapter, mb, pi->viid,
521                                     pi->xact_addr_filt, dev->dev_addr, true,
522                                     true);
523                 if (ret >= 0) {
524                         pi->xact_addr_filt = ret;
525                         ret = 0;
526                 }
527         }
528         if (ret == 0)
529                 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
530                                     &pi->link_cfg);
531         if (ret == 0)
532                 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
533         return ret;
534 }
535
536 /* Clear a filter and release any of its resources that we own.  This also
537  * clears the filter's "pending" status.
538  */
539 static void clear_filter(struct adapter *adap, struct filter_entry *f)
540 {
541         /* If the new or old filter have loopback rewriteing rules then we'll
542          * need to free any existing Layer Two Table (L2T) entries of the old
543          * filter rule.  The firmware will handle freeing up any Source MAC
544          * Table (SMT) entries used for rewriting Source MAC Addresses in
545          * loopback rules.
546          */
547         if (f->l2t)
548                 cxgb4_l2t_release(f->l2t);
549
550         /* The zeroing of the filter rule below clears the filter valid,
551          * pending, locked flags, l2t pointer, etc. so it's all we need for
552          * this operation.
553          */
554         memset(f, 0, sizeof(*f));
555 }
556
557 /* Handle a filter write/deletion reply.
558  */
559 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
560 {
561         unsigned int idx = GET_TID(rpl);
562         unsigned int nidx = idx - adap->tids.ftid_base;
563         unsigned int ret;
564         struct filter_entry *f;
565
566         if (idx >= adap->tids.ftid_base && nidx <
567            (adap->tids.nftids + adap->tids.nsftids)) {
568                 idx = nidx;
569                 ret = GET_TCB_COOKIE(rpl->cookie);
570                 f = &adap->tids.ftid_tab[idx];
571
572                 if (ret == FW_FILTER_WR_FLT_DELETED) {
573                         /* Clear the filter when we get confirmation from the
574                          * hardware that the filter has been deleted.
575                          */
576                         clear_filter(adap, f);
577                 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
578                         dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
579                                 idx);
580                         clear_filter(adap, f);
581                 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
582                         f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
583                         f->pending = 0;  /* asynchronous setup completed */
584                         f->valid = 1;
585                 } else {
586                         /* Something went wrong.  Issue a warning about the
587                          * problem and clear everything out.
588                          */
589                         dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
590                                 idx, ret);
591                         clear_filter(adap, f);
592                 }
593         }
594 }
595
596 /* Response queue handler for the FW event queue.
597  */
598 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
599                           const struct pkt_gl *gl)
600 {
601         u8 opcode = ((const struct rss_header *)rsp)->opcode;
602
603         rsp++;                                          /* skip RSS header */
604         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
605                 const struct cpl_sge_egr_update *p = (void *)rsp;
606                 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
607                 struct sge_txq *txq;
608
609                 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
610                 txq->restarts++;
611                 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
612                         struct sge_eth_txq *eq;
613
614                         eq = container_of(txq, struct sge_eth_txq, q);
615                         netif_tx_wake_queue(eq->txq);
616                 } else {
617                         struct sge_ofld_txq *oq;
618
619                         oq = container_of(txq, struct sge_ofld_txq, q);
620                         tasklet_schedule(&oq->qresume_tsk);
621                 }
622         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
623                 const struct cpl_fw6_msg *p = (void *)rsp;
624
625                 if (p->type == 0)
626                         t4_handle_fw_rpl(q->adap, p->data);
627         } else if (opcode == CPL_L2T_WRITE_RPL) {
628                 const struct cpl_l2t_write_rpl *p = (void *)rsp;
629
630                 do_l2t_write_rpl(q->adap, p);
631         } else if (opcode == CPL_SET_TCB_RPL) {
632                 const struct cpl_set_tcb_rpl *p = (void *)rsp;
633
634                 filter_rpl(q->adap, p);
635         } else
636                 dev_err(q->adap->pdev_dev,
637                         "unexpected CPL %#x on FW event queue\n", opcode);
638         return 0;
639 }
640
641 /**
642  *      uldrx_handler - response queue handler for ULD queues
643  *      @q: the response queue that received the packet
644  *      @rsp: the response queue descriptor holding the offload message
645  *      @gl: the gather list of packet fragments
646  *
647  *      Deliver an ingress offload packet to a ULD.  All processing is done by
648  *      the ULD, we just maintain statistics.
649  */
650 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
651                          const struct pkt_gl *gl)
652 {
653         struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
654
655         if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
656                 rxq->stats.nomem++;
657                 return -1;
658         }
659         if (gl == NULL)
660                 rxq->stats.imm++;
661         else if (gl == CXGB4_MSG_AN)
662                 rxq->stats.an++;
663         else
664                 rxq->stats.pkts++;
665         return 0;
666 }
667
668 static void disable_msi(struct adapter *adapter)
669 {
670         if (adapter->flags & USING_MSIX) {
671                 pci_disable_msix(adapter->pdev);
672                 adapter->flags &= ~USING_MSIX;
673         } else if (adapter->flags & USING_MSI) {
674                 pci_disable_msi(adapter->pdev);
675                 adapter->flags &= ~USING_MSI;
676         }
677 }
678
679 /*
680  * Interrupt handler for non-data events used with MSI-X.
681  */
682 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
683 {
684         struct adapter *adap = cookie;
685
686         u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
687         if (v & PFSW) {
688                 adap->swintr = 1;
689                 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
690         }
691         t4_slow_intr_handler(adap);
692         return IRQ_HANDLED;
693 }
694
695 /*
696  * Name the MSI-X interrupts.
697  */
698 static void name_msix_vecs(struct adapter *adap)
699 {
700         int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
701
702         /* non-data interrupts */
703         snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
704
705         /* FW events */
706         snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
707                  adap->port[0]->name);
708
709         /* Ethernet queues */
710         for_each_port(adap, j) {
711                 struct net_device *d = adap->port[j];
712                 const struct port_info *pi = netdev_priv(d);
713
714                 for (i = 0; i < pi->nqsets; i++, msi_idx++)
715                         snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
716                                  d->name, i);
717         }
718
719         /* offload queues */
720         for_each_ofldrxq(&adap->sge, i)
721                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
722                          adap->port[0]->name, i);
723
724         for_each_rdmarxq(&adap->sge, i)
725                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
726                          adap->port[0]->name, i);
727 }
728
729 static int request_msix_queue_irqs(struct adapter *adap)
730 {
731         struct sge *s = &adap->sge;
732         int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
733
734         err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
735                           adap->msix_info[1].desc, &s->fw_evtq);
736         if (err)
737                 return err;
738
739         for_each_ethrxq(s, ethqidx) {
740                 err = request_irq(adap->msix_info[msi_index].vec,
741                                   t4_sge_intr_msix, 0,
742                                   adap->msix_info[msi_index].desc,
743                                   &s->ethrxq[ethqidx].rspq);
744                 if (err)
745                         goto unwind;
746                 msi_index++;
747         }
748         for_each_ofldrxq(s, ofldqidx) {
749                 err = request_irq(adap->msix_info[msi_index].vec,
750                                   t4_sge_intr_msix, 0,
751                                   adap->msix_info[msi_index].desc,
752                                   &s->ofldrxq[ofldqidx].rspq);
753                 if (err)
754                         goto unwind;
755                 msi_index++;
756         }
757         for_each_rdmarxq(s, rdmaqidx) {
758                 err = request_irq(adap->msix_info[msi_index].vec,
759                                   t4_sge_intr_msix, 0,
760                                   adap->msix_info[msi_index].desc,
761                                   &s->rdmarxq[rdmaqidx].rspq);
762                 if (err)
763                         goto unwind;
764                 msi_index++;
765         }
766         return 0;
767
768 unwind:
769         while (--rdmaqidx >= 0)
770                 free_irq(adap->msix_info[--msi_index].vec,
771                          &s->rdmarxq[rdmaqidx].rspq);
772         while (--ofldqidx >= 0)
773                 free_irq(adap->msix_info[--msi_index].vec,
774                          &s->ofldrxq[ofldqidx].rspq);
775         while (--ethqidx >= 0)
776                 free_irq(adap->msix_info[--msi_index].vec,
777                          &s->ethrxq[ethqidx].rspq);
778         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
779         return err;
780 }
781
782 static void free_msix_queue_irqs(struct adapter *adap)
783 {
784         int i, msi_index = 2;
785         struct sge *s = &adap->sge;
786
787         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
788         for_each_ethrxq(s, i)
789                 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
790         for_each_ofldrxq(s, i)
791                 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
792         for_each_rdmarxq(s, i)
793                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
794 }
795
796 /**
797  *      write_rss - write the RSS table for a given port
798  *      @pi: the port
799  *      @queues: array of queue indices for RSS
800  *
801  *      Sets up the portion of the HW RSS table for the port's VI to distribute
802  *      packets to the Rx queues in @queues.
803  */
804 static int write_rss(const struct port_info *pi, const u16 *queues)
805 {
806         u16 *rss;
807         int i, err;
808         const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
809
810         rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
811         if (!rss)
812                 return -ENOMEM;
813
814         /* map the queue indices to queue ids */
815         for (i = 0; i < pi->rss_size; i++, queues++)
816                 rss[i] = q[*queues].rspq.abs_id;
817
818         err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
819                                   pi->rss_size, rss, pi->rss_size);
820         kfree(rss);
821         return err;
822 }
823
824 /**
825  *      setup_rss - configure RSS
826  *      @adap: the adapter
827  *
828  *      Sets up RSS for each port.
829  */
830 static int setup_rss(struct adapter *adap)
831 {
832         int i, err;
833
834         for_each_port(adap, i) {
835                 const struct port_info *pi = adap2pinfo(adap, i);
836
837                 err = write_rss(pi, pi->rss);
838                 if (err)
839                         return err;
840         }
841         return 0;
842 }
843
844 /*
845  * Return the channel of the ingress queue with the given qid.
846  */
847 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
848 {
849         qid -= p->ingr_start;
850         return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
851 }
852
853 /*
854  * Wait until all NAPI handlers are descheduled.
855  */
856 static void quiesce_rx(struct adapter *adap)
857 {
858         int i;
859
860         for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
861                 struct sge_rspq *q = adap->sge.ingr_map[i];
862
863                 if (q && q->handler)
864                         napi_disable(&q->napi);
865         }
866 }
867
868 /*
869  * Enable NAPI scheduling and interrupt generation for all Rx queues.
870  */
871 static void enable_rx(struct adapter *adap)
872 {
873         int i;
874
875         for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
876                 struct sge_rspq *q = adap->sge.ingr_map[i];
877
878                 if (!q)
879                         continue;
880                 if (q->handler)
881                         napi_enable(&q->napi);
882                 /* 0-increment GTS to start the timer and enable interrupts */
883                 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
884                              SEINTARM(q->intr_params) |
885                              INGRESSQID(q->cntxt_id));
886         }
887 }
888
889 /**
890  *      setup_sge_queues - configure SGE Tx/Rx/response queues
891  *      @adap: the adapter
892  *
893  *      Determines how many sets of SGE queues to use and initializes them.
894  *      We support multiple queue sets per port if we have MSI-X, otherwise
895  *      just one queue set per port.
896  */
897 static int setup_sge_queues(struct adapter *adap)
898 {
899         int err, msi_idx, i, j;
900         struct sge *s = &adap->sge;
901
902         bitmap_zero(s->starving_fl, MAX_EGRQ);
903         bitmap_zero(s->txq_maperr, MAX_EGRQ);
904
905         if (adap->flags & USING_MSIX)
906                 msi_idx = 1;         /* vector 0 is for non-queue interrupts */
907         else {
908                 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
909                                        NULL, NULL);
910                 if (err)
911                         return err;
912                 msi_idx = -((int)s->intrq.abs_id + 1);
913         }
914
915         err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
916                                msi_idx, NULL, fwevtq_handler);
917         if (err) {
918 freeout:        t4_free_sge_resources(adap);
919                 return err;
920         }
921
922         for_each_port(adap, i) {
923                 struct net_device *dev = adap->port[i];
924                 struct port_info *pi = netdev_priv(dev);
925                 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
926                 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
927
928                 for (j = 0; j < pi->nqsets; j++, q++) {
929                         if (msi_idx > 0)
930                                 msi_idx++;
931                         err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
932                                                msi_idx, &q->fl,
933                                                t4_ethrx_handler);
934                         if (err)
935                                 goto freeout;
936                         q->rspq.idx = j;
937                         memset(&q->stats, 0, sizeof(q->stats));
938                 }
939                 for (j = 0; j < pi->nqsets; j++, t++) {
940                         err = t4_sge_alloc_eth_txq(adap, t, dev,
941                                         netdev_get_tx_queue(dev, j),
942                                         s->fw_evtq.cntxt_id);
943                         if (err)
944                                 goto freeout;
945                 }
946         }
947
948         j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
949         for_each_ofldrxq(s, i) {
950                 struct sge_ofld_rxq *q = &s->ofldrxq[i];
951                 struct net_device *dev = adap->port[i / j];
952
953                 if (msi_idx > 0)
954                         msi_idx++;
955                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
956                                        &q->fl, uldrx_handler);
957                 if (err)
958                         goto freeout;
959                 memset(&q->stats, 0, sizeof(q->stats));
960                 s->ofld_rxq[i] = q->rspq.abs_id;
961                 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
962                                             s->fw_evtq.cntxt_id);
963                 if (err)
964                         goto freeout;
965         }
966
967         for_each_rdmarxq(s, i) {
968                 struct sge_ofld_rxq *q = &s->rdmarxq[i];
969
970                 if (msi_idx > 0)
971                         msi_idx++;
972                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
973                                        msi_idx, &q->fl, uldrx_handler);
974                 if (err)
975                         goto freeout;
976                 memset(&q->stats, 0, sizeof(q->stats));
977                 s->rdma_rxq[i] = q->rspq.abs_id;
978         }
979
980         for_each_port(adap, i) {
981                 /*
982                  * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
983                  * have RDMA queues, and that's the right value.
984                  */
985                 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
986                                             s->fw_evtq.cntxt_id,
987                                             s->rdmarxq[i].rspq.cntxt_id);
988                 if (err)
989                         goto freeout;
990         }
991
992         t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
993                      RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
994                      QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
995         return 0;
996 }
997
998 /*
999  * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1000  * started but failed, and a negative errno if flash load couldn't start.
1001  */
1002 static int upgrade_fw(struct adapter *adap)
1003 {
1004         int ret;
1005         u32 vers;
1006         const struct fw_hdr *hdr;
1007         const struct firmware *fw;
1008         struct device *dev = adap->pdev_dev;
1009
1010         ret = request_firmware(&fw, FW_FNAME, dev);
1011         if (ret < 0) {
1012                 dev_err(dev, "unable to load firmware image " FW_FNAME
1013                         ", error %d\n", ret);
1014                 return ret;
1015         }
1016
1017         hdr = (const struct fw_hdr *)fw->data;
1018         vers = ntohl(hdr->fw_ver);
1019         if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
1020                 ret = -EINVAL;              /* wrong major version, won't do */
1021                 goto out;
1022         }
1023
1024         /*
1025          * If the flash FW is unusable or we found something newer, load it.
1026          */
1027         if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
1028             vers > adap->params.fw_vers) {
1029                 dev_info(dev, "upgrading firmware ...\n");
1030                 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1031                                     /*force=*/false);
1032                 if (!ret)
1033                         dev_info(dev, "firmware successfully upgraded to "
1034                                  FW_FNAME " (%d.%d.%d.%d)\n",
1035                                  FW_HDR_FW_VER_MAJOR_GET(vers),
1036                                  FW_HDR_FW_VER_MINOR_GET(vers),
1037                                  FW_HDR_FW_VER_MICRO_GET(vers),
1038                                  FW_HDR_FW_VER_BUILD_GET(vers));
1039                 else
1040                         dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1041         } else {
1042                 /*
1043                  * Tell our caller that we didn't upgrade the firmware.
1044                  */
1045                 ret = -EINVAL;
1046         }
1047
1048 out:    release_firmware(fw);
1049         return ret;
1050 }
1051
1052 /*
1053  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1054  * The allocated memory is cleared.
1055  */
1056 void *t4_alloc_mem(size_t size)
1057 {
1058         void *p = kzalloc(size, GFP_KERNEL);
1059
1060         if (!p)
1061                 p = vzalloc(size);
1062         return p;
1063 }
1064
1065 /*
1066  * Free memory allocated through alloc_mem().
1067  */
1068 static void t4_free_mem(void *addr)
1069 {
1070         if (is_vmalloc_addr(addr))
1071                 vfree(addr);
1072         else
1073                 kfree(addr);
1074 }
1075
1076 /* Send a Work Request to write the filter at a specified index.  We construct
1077  * a Firmware Filter Work Request to have the work done and put the indicated
1078  * filter into "pending" mode which will prevent any further actions against
1079  * it till we get a reply from the firmware on the completion status of the
1080  * request.
1081  */
1082 static int set_filter_wr(struct adapter *adapter, int fidx)
1083 {
1084         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1085         struct sk_buff *skb;
1086         struct fw_filter_wr *fwr;
1087         unsigned int ftid;
1088
1089         /* If the new filter requires loopback Destination MAC and/or VLAN
1090          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1091          * the filter.
1092          */
1093         if (f->fs.newdmac || f->fs.newvlan) {
1094                 /* allocate L2T entry for new filter */
1095                 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1096                 if (f->l2t == NULL)
1097                         return -EAGAIN;
1098                 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1099                                         f->fs.eport, f->fs.dmac)) {
1100                         cxgb4_l2t_release(f->l2t);
1101                         f->l2t = NULL;
1102                         return -ENOMEM;
1103                 }
1104         }
1105
1106         ftid = adapter->tids.ftid_base + fidx;
1107
1108         skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1109         fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1110         memset(fwr, 0, sizeof(*fwr));
1111
1112         /* It would be nice to put most of the following in t4_hw.c but most
1113          * of the work is translating the cxgbtool ch_filter_specification
1114          * into the Work Request and the definition of that structure is
1115          * currently in cxgbtool.h which isn't appropriate to pull into the
1116          * common code.  We may eventually try to come up with a more neutral
1117          * filter specification structure but for now it's easiest to simply
1118          * put this fairly direct code in line ...
1119          */
1120         fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1121         fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1122         fwr->tid_to_iq =
1123                 htonl(V_FW_FILTER_WR_TID(ftid) |
1124                       V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1125                       V_FW_FILTER_WR_NOREPLY(0) |
1126                       V_FW_FILTER_WR_IQ(f->fs.iq));
1127         fwr->del_filter_to_l2tix =
1128                 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1129                       V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1130                       V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1131                       V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1132                       V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1133                       V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1134                       V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1135                       V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1136                       V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1137                                              f->fs.newvlan == VLAN_REWRITE) |
1138                       V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1139                                             f->fs.newvlan == VLAN_REWRITE) |
1140                       V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1141                       V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1142                       V_FW_FILTER_WR_PRIO(f->fs.prio) |
1143                       V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1144         fwr->ethtype = htons(f->fs.val.ethtype);
1145         fwr->ethtypem = htons(f->fs.mask.ethtype);
1146         fwr->frag_to_ovlan_vldm =
1147                 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1148                  V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1149                  V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1150                  V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1151                  V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1152                  V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1153         fwr->smac_sel = 0;
1154         fwr->rx_chan_rx_rpl_iq =
1155                 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1156                       V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1157         fwr->maci_to_matchtypem =
1158                 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1159                       V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1160                       V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1161                       V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1162                       V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1163                       V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1164                       V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1165                       V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1166         fwr->ptcl = f->fs.val.proto;
1167         fwr->ptclm = f->fs.mask.proto;
1168         fwr->ttyp = f->fs.val.tos;
1169         fwr->ttypm = f->fs.mask.tos;
1170         fwr->ivlan = htons(f->fs.val.ivlan);
1171         fwr->ivlanm = htons(f->fs.mask.ivlan);
1172         fwr->ovlan = htons(f->fs.val.ovlan);
1173         fwr->ovlanm = htons(f->fs.mask.ovlan);
1174         memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1175         memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1176         memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1177         memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1178         fwr->lp = htons(f->fs.val.lport);
1179         fwr->lpm = htons(f->fs.mask.lport);
1180         fwr->fp = htons(f->fs.val.fport);
1181         fwr->fpm = htons(f->fs.mask.fport);
1182         if (f->fs.newsmac)
1183                 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1184
1185         /* Mark the filter as "pending" and ship off the Filter Work Request.
1186          * When we get the Work Request Reply we'll clear the pending status.
1187          */
1188         f->pending = 1;
1189         set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1190         t4_ofld_send(adapter, skb);
1191         return 0;
1192 }
1193
1194 /* Delete the filter at a specified index.
1195  */
1196 static int del_filter_wr(struct adapter *adapter, int fidx)
1197 {
1198         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1199         struct sk_buff *skb;
1200         struct fw_filter_wr *fwr;
1201         unsigned int len, ftid;
1202
1203         len = sizeof(*fwr);
1204         ftid = adapter->tids.ftid_base + fidx;
1205
1206         skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1207         fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1208         t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1209
1210         /* Mark the filter as "pending" and ship off the Filter Work Request.
1211          * When we get the Work Request Reply we'll clear the pending status.
1212          */
1213         f->pending = 1;
1214         t4_mgmt_tx(adapter, skb);
1215         return 0;
1216 }
1217
1218 static inline int is_offload(const struct adapter *adap)
1219 {
1220         return adap->params.offload;
1221 }
1222
1223 /*
1224  * Implementation of ethtool operations.
1225  */
1226
1227 static u32 get_msglevel(struct net_device *dev)
1228 {
1229         return netdev2adap(dev)->msg_enable;
1230 }
1231
1232 static void set_msglevel(struct net_device *dev, u32 val)
1233 {
1234         netdev2adap(dev)->msg_enable = val;
1235 }
1236
1237 static char stats_strings[][ETH_GSTRING_LEN] = {
1238         "TxOctetsOK         ",
1239         "TxFramesOK         ",
1240         "TxBroadcastFrames  ",
1241         "TxMulticastFrames  ",
1242         "TxUnicastFrames    ",
1243         "TxErrorFrames      ",
1244
1245         "TxFrames64         ",
1246         "TxFrames65To127    ",
1247         "TxFrames128To255   ",
1248         "TxFrames256To511   ",
1249         "TxFrames512To1023  ",
1250         "TxFrames1024To1518 ",
1251         "TxFrames1519ToMax  ",
1252
1253         "TxFramesDropped    ",
1254         "TxPauseFrames      ",
1255         "TxPPP0Frames       ",
1256         "TxPPP1Frames       ",
1257         "TxPPP2Frames       ",
1258         "TxPPP3Frames       ",
1259         "TxPPP4Frames       ",
1260         "TxPPP5Frames       ",
1261         "TxPPP6Frames       ",
1262         "TxPPP7Frames       ",
1263
1264         "RxOctetsOK         ",
1265         "RxFramesOK         ",
1266         "RxBroadcastFrames  ",
1267         "RxMulticastFrames  ",
1268         "RxUnicastFrames    ",
1269
1270         "RxFramesTooLong    ",
1271         "RxJabberErrors     ",
1272         "RxFCSErrors        ",
1273         "RxLengthErrors     ",
1274         "RxSymbolErrors     ",
1275         "RxRuntFrames       ",
1276
1277         "RxFrames64         ",
1278         "RxFrames65To127    ",
1279         "RxFrames128To255   ",
1280         "RxFrames256To511   ",
1281         "RxFrames512To1023  ",
1282         "RxFrames1024To1518 ",
1283         "RxFrames1519ToMax  ",
1284
1285         "RxPauseFrames      ",
1286         "RxPPP0Frames       ",
1287         "RxPPP1Frames       ",
1288         "RxPPP2Frames       ",
1289         "RxPPP3Frames       ",
1290         "RxPPP4Frames       ",
1291         "RxPPP5Frames       ",
1292         "RxPPP6Frames       ",
1293         "RxPPP7Frames       ",
1294
1295         "RxBG0FramesDropped ",
1296         "RxBG1FramesDropped ",
1297         "RxBG2FramesDropped ",
1298         "RxBG3FramesDropped ",
1299         "RxBG0FramesTrunc   ",
1300         "RxBG1FramesTrunc   ",
1301         "RxBG2FramesTrunc   ",
1302         "RxBG3FramesTrunc   ",
1303
1304         "TSO                ",
1305         "TxCsumOffload      ",
1306         "RxCsumGood         ",
1307         "VLANextractions    ",
1308         "VLANinsertions     ",
1309         "GROpackets         ",
1310         "GROmerged          ",
1311 };
1312
1313 static int get_sset_count(struct net_device *dev, int sset)
1314 {
1315         switch (sset) {
1316         case ETH_SS_STATS:
1317                 return ARRAY_SIZE(stats_strings);
1318         default:
1319                 return -EOPNOTSUPP;
1320         }
1321 }
1322
1323 #define T4_REGMAP_SIZE (160 * 1024)
1324
1325 static int get_regs_len(struct net_device *dev)
1326 {
1327         return T4_REGMAP_SIZE;
1328 }
1329
1330 static int get_eeprom_len(struct net_device *dev)
1331 {
1332         return EEPROMSIZE;
1333 }
1334
1335 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1336 {
1337         struct adapter *adapter = netdev2adap(dev);
1338
1339         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1340         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1341         strlcpy(info->bus_info, pci_name(adapter->pdev),
1342                 sizeof(info->bus_info));
1343
1344         if (adapter->params.fw_vers)
1345                 snprintf(info->fw_version, sizeof(info->fw_version),
1346                         "%u.%u.%u.%u, TP %u.%u.%u.%u",
1347                         FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1348                         FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1349                         FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1350                         FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1351                         FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1352                         FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1353                         FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1354                         FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1355 }
1356
1357 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1358 {
1359         if (stringset == ETH_SS_STATS)
1360                 memcpy(data, stats_strings, sizeof(stats_strings));
1361 }
1362
1363 /*
1364  * port stats maintained per queue of the port.  They should be in the same
1365  * order as in stats_strings above.
1366  */
1367 struct queue_port_stats {
1368         u64 tso;
1369         u64 tx_csum;
1370         u64 rx_csum;
1371         u64 vlan_ex;
1372         u64 vlan_ins;
1373         u64 gro_pkts;
1374         u64 gro_merged;
1375 };
1376
1377 static void collect_sge_port_stats(const struct adapter *adap,
1378                 const struct port_info *p, struct queue_port_stats *s)
1379 {
1380         int i;
1381         const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1382         const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1383
1384         memset(s, 0, sizeof(*s));
1385         for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1386                 s->tso += tx->tso;
1387                 s->tx_csum += tx->tx_cso;
1388                 s->rx_csum += rx->stats.rx_cso;
1389                 s->vlan_ex += rx->stats.vlan_ex;
1390                 s->vlan_ins += tx->vlan_ins;
1391                 s->gro_pkts += rx->stats.lro_pkts;
1392                 s->gro_merged += rx->stats.lro_merged;
1393         }
1394 }
1395
1396 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1397                       u64 *data)
1398 {
1399         struct port_info *pi = netdev_priv(dev);
1400         struct adapter *adapter = pi->adapter;
1401
1402         t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1403
1404         data += sizeof(struct port_stats) / sizeof(u64);
1405         collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1406 }
1407
1408 /*
1409  * Return a version number to identify the type of adapter.  The scheme is:
1410  * - bits 0..9: chip version
1411  * - bits 10..15: chip revision
1412  * - bits 16..23: register dump version
1413  */
1414 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1415 {
1416         return 4 | (ap->params.rev << 10) | (1 << 16);
1417 }
1418
1419 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1420                            unsigned int end)
1421 {
1422         u32 *p = buf + start;
1423
1424         for ( ; start <= end; start += sizeof(u32))
1425                 *p++ = t4_read_reg(ap, start);
1426 }
1427
1428 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1429                      void *buf)
1430 {
1431         static const unsigned int reg_ranges[] = {
1432                 0x1008, 0x1108,
1433                 0x1180, 0x11b4,
1434                 0x11fc, 0x123c,
1435                 0x1300, 0x173c,
1436                 0x1800, 0x18fc,
1437                 0x3000, 0x30d8,
1438                 0x30e0, 0x5924,
1439                 0x5960, 0x59d4,
1440                 0x5a00, 0x5af8,
1441                 0x6000, 0x6098,
1442                 0x6100, 0x6150,
1443                 0x6200, 0x6208,
1444                 0x6240, 0x6248,
1445                 0x6280, 0x6338,
1446                 0x6370, 0x638c,
1447                 0x6400, 0x643c,
1448                 0x6500, 0x6524,
1449                 0x6a00, 0x6a38,
1450                 0x6a60, 0x6a78,
1451                 0x6b00, 0x6b84,
1452                 0x6bf0, 0x6c84,
1453                 0x6cf0, 0x6d84,
1454                 0x6df0, 0x6e84,
1455                 0x6ef0, 0x6f84,
1456                 0x6ff0, 0x7084,
1457                 0x70f0, 0x7184,
1458                 0x71f0, 0x7284,
1459                 0x72f0, 0x7384,
1460                 0x73f0, 0x7450,
1461                 0x7500, 0x7530,
1462                 0x7600, 0x761c,
1463                 0x7680, 0x76cc,
1464                 0x7700, 0x7798,
1465                 0x77c0, 0x77fc,
1466                 0x7900, 0x79fc,
1467                 0x7b00, 0x7c38,
1468                 0x7d00, 0x7efc,
1469                 0x8dc0, 0x8e1c,
1470                 0x8e30, 0x8e78,
1471                 0x8ea0, 0x8f6c,
1472                 0x8fc0, 0x9074,
1473                 0x90fc, 0x90fc,
1474                 0x9400, 0x9458,
1475                 0x9600, 0x96bc,
1476                 0x9800, 0x9808,
1477                 0x9820, 0x983c,
1478                 0x9850, 0x9864,
1479                 0x9c00, 0x9c6c,
1480                 0x9c80, 0x9cec,
1481                 0x9d00, 0x9d6c,
1482                 0x9d80, 0x9dec,
1483                 0x9e00, 0x9e6c,
1484                 0x9e80, 0x9eec,
1485                 0x9f00, 0x9f6c,
1486                 0x9f80, 0x9fec,
1487                 0xd004, 0xd03c,
1488                 0xdfc0, 0xdfe0,
1489                 0xe000, 0xea7c,
1490                 0xf000, 0x11190,
1491                 0x19040, 0x1906c,
1492                 0x19078, 0x19080,
1493                 0x1908c, 0x19124,
1494                 0x19150, 0x191b0,
1495                 0x191d0, 0x191e8,
1496                 0x19238, 0x1924c,
1497                 0x193f8, 0x19474,
1498                 0x19490, 0x194f8,
1499                 0x19800, 0x19f30,
1500                 0x1a000, 0x1a06c,
1501                 0x1a0b0, 0x1a120,
1502                 0x1a128, 0x1a138,
1503                 0x1a190, 0x1a1c4,
1504                 0x1a1fc, 0x1a1fc,
1505                 0x1e040, 0x1e04c,
1506                 0x1e284, 0x1e28c,
1507                 0x1e2c0, 0x1e2c0,
1508                 0x1e2e0, 0x1e2e0,
1509                 0x1e300, 0x1e384,
1510                 0x1e3c0, 0x1e3c8,
1511                 0x1e440, 0x1e44c,
1512                 0x1e684, 0x1e68c,
1513                 0x1e6c0, 0x1e6c0,
1514                 0x1e6e0, 0x1e6e0,
1515                 0x1e700, 0x1e784,
1516                 0x1e7c0, 0x1e7c8,
1517                 0x1e840, 0x1e84c,
1518                 0x1ea84, 0x1ea8c,
1519                 0x1eac0, 0x1eac0,
1520                 0x1eae0, 0x1eae0,
1521                 0x1eb00, 0x1eb84,
1522                 0x1ebc0, 0x1ebc8,
1523                 0x1ec40, 0x1ec4c,
1524                 0x1ee84, 0x1ee8c,
1525                 0x1eec0, 0x1eec0,
1526                 0x1eee0, 0x1eee0,
1527                 0x1ef00, 0x1ef84,
1528                 0x1efc0, 0x1efc8,
1529                 0x1f040, 0x1f04c,
1530                 0x1f284, 0x1f28c,
1531                 0x1f2c0, 0x1f2c0,
1532                 0x1f2e0, 0x1f2e0,
1533                 0x1f300, 0x1f384,
1534                 0x1f3c0, 0x1f3c8,
1535                 0x1f440, 0x1f44c,
1536                 0x1f684, 0x1f68c,
1537                 0x1f6c0, 0x1f6c0,
1538                 0x1f6e0, 0x1f6e0,
1539                 0x1f700, 0x1f784,
1540                 0x1f7c0, 0x1f7c8,
1541                 0x1f840, 0x1f84c,
1542                 0x1fa84, 0x1fa8c,
1543                 0x1fac0, 0x1fac0,
1544                 0x1fae0, 0x1fae0,
1545                 0x1fb00, 0x1fb84,
1546                 0x1fbc0, 0x1fbc8,
1547                 0x1fc40, 0x1fc4c,
1548                 0x1fe84, 0x1fe8c,
1549                 0x1fec0, 0x1fec0,
1550                 0x1fee0, 0x1fee0,
1551                 0x1ff00, 0x1ff84,
1552                 0x1ffc0, 0x1ffc8,
1553                 0x20000, 0x2002c,
1554                 0x20100, 0x2013c,
1555                 0x20190, 0x201c8,
1556                 0x20200, 0x20318,
1557                 0x20400, 0x20528,
1558                 0x20540, 0x20614,
1559                 0x21000, 0x21040,
1560                 0x2104c, 0x21060,
1561                 0x210c0, 0x210ec,
1562                 0x21200, 0x21268,
1563                 0x21270, 0x21284,
1564                 0x212fc, 0x21388,
1565                 0x21400, 0x21404,
1566                 0x21500, 0x21518,
1567                 0x2152c, 0x2153c,
1568                 0x21550, 0x21554,
1569                 0x21600, 0x21600,
1570                 0x21608, 0x21628,
1571                 0x21630, 0x2163c,
1572                 0x21700, 0x2171c,
1573                 0x21780, 0x2178c,
1574                 0x21800, 0x21c38,
1575                 0x21c80, 0x21d7c,
1576                 0x21e00, 0x21e04,
1577                 0x22000, 0x2202c,
1578                 0x22100, 0x2213c,
1579                 0x22190, 0x221c8,
1580                 0x22200, 0x22318,
1581                 0x22400, 0x22528,
1582                 0x22540, 0x22614,
1583                 0x23000, 0x23040,
1584                 0x2304c, 0x23060,
1585                 0x230c0, 0x230ec,
1586                 0x23200, 0x23268,
1587                 0x23270, 0x23284,
1588                 0x232fc, 0x23388,
1589                 0x23400, 0x23404,
1590                 0x23500, 0x23518,
1591                 0x2352c, 0x2353c,
1592                 0x23550, 0x23554,
1593                 0x23600, 0x23600,
1594                 0x23608, 0x23628,
1595                 0x23630, 0x2363c,
1596                 0x23700, 0x2371c,
1597                 0x23780, 0x2378c,
1598                 0x23800, 0x23c38,
1599                 0x23c80, 0x23d7c,
1600                 0x23e00, 0x23e04,
1601                 0x24000, 0x2402c,
1602                 0x24100, 0x2413c,
1603                 0x24190, 0x241c8,
1604                 0x24200, 0x24318,
1605                 0x24400, 0x24528,
1606                 0x24540, 0x24614,
1607                 0x25000, 0x25040,
1608                 0x2504c, 0x25060,
1609                 0x250c0, 0x250ec,
1610                 0x25200, 0x25268,
1611                 0x25270, 0x25284,
1612                 0x252fc, 0x25388,
1613                 0x25400, 0x25404,
1614                 0x25500, 0x25518,
1615                 0x2552c, 0x2553c,
1616                 0x25550, 0x25554,
1617                 0x25600, 0x25600,
1618                 0x25608, 0x25628,
1619                 0x25630, 0x2563c,
1620                 0x25700, 0x2571c,
1621                 0x25780, 0x2578c,
1622                 0x25800, 0x25c38,
1623                 0x25c80, 0x25d7c,
1624                 0x25e00, 0x25e04,
1625                 0x26000, 0x2602c,
1626                 0x26100, 0x2613c,
1627                 0x26190, 0x261c8,
1628                 0x26200, 0x26318,
1629                 0x26400, 0x26528,
1630                 0x26540, 0x26614,
1631                 0x27000, 0x27040,
1632                 0x2704c, 0x27060,
1633                 0x270c0, 0x270ec,
1634                 0x27200, 0x27268,
1635                 0x27270, 0x27284,
1636                 0x272fc, 0x27388,
1637                 0x27400, 0x27404,
1638                 0x27500, 0x27518,
1639                 0x2752c, 0x2753c,
1640                 0x27550, 0x27554,
1641                 0x27600, 0x27600,
1642                 0x27608, 0x27628,
1643                 0x27630, 0x2763c,
1644                 0x27700, 0x2771c,
1645                 0x27780, 0x2778c,
1646                 0x27800, 0x27c38,
1647                 0x27c80, 0x27d7c,
1648                 0x27e00, 0x27e04
1649         };
1650
1651         int i;
1652         struct adapter *ap = netdev2adap(dev);
1653
1654         regs->version = mk_adap_vers(ap);
1655
1656         memset(buf, 0, T4_REGMAP_SIZE);
1657         for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1658                 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1659 }
1660
1661 static int restart_autoneg(struct net_device *dev)
1662 {
1663         struct port_info *p = netdev_priv(dev);
1664
1665         if (!netif_running(dev))
1666                 return -EAGAIN;
1667         if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1668                 return -EINVAL;
1669         t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1670         return 0;
1671 }
1672
1673 static int identify_port(struct net_device *dev,
1674                          enum ethtool_phys_id_state state)
1675 {
1676         unsigned int val;
1677         struct adapter *adap = netdev2adap(dev);
1678
1679         if (state == ETHTOOL_ID_ACTIVE)
1680                 val = 0xffff;
1681         else if (state == ETHTOOL_ID_INACTIVE)
1682                 val = 0;
1683         else
1684                 return -EINVAL;
1685
1686         return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
1687 }
1688
1689 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1690 {
1691         unsigned int v = 0;
1692
1693         if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1694             type == FW_PORT_TYPE_BT_XAUI) {
1695                 v |= SUPPORTED_TP;
1696                 if (caps & FW_PORT_CAP_SPEED_100M)
1697                         v |= SUPPORTED_100baseT_Full;
1698                 if (caps & FW_PORT_CAP_SPEED_1G)
1699                         v |= SUPPORTED_1000baseT_Full;
1700                 if (caps & FW_PORT_CAP_SPEED_10G)
1701                         v |= SUPPORTED_10000baseT_Full;
1702         } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1703                 v |= SUPPORTED_Backplane;
1704                 if (caps & FW_PORT_CAP_SPEED_1G)
1705                         v |= SUPPORTED_1000baseKX_Full;
1706                 if (caps & FW_PORT_CAP_SPEED_10G)
1707                         v |= SUPPORTED_10000baseKX4_Full;
1708         } else if (type == FW_PORT_TYPE_KR)
1709                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1710         else if (type == FW_PORT_TYPE_BP_AP)
1711                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1712                      SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1713         else if (type == FW_PORT_TYPE_BP4_AP)
1714                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1715                      SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1716                      SUPPORTED_10000baseKX4_Full;
1717         else if (type == FW_PORT_TYPE_FIBER_XFI ||
1718                  type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1719                 v |= SUPPORTED_FIBRE;
1720
1721         if (caps & FW_PORT_CAP_ANEG)
1722                 v |= SUPPORTED_Autoneg;
1723         return v;
1724 }
1725
1726 static unsigned int to_fw_linkcaps(unsigned int caps)
1727 {
1728         unsigned int v = 0;
1729
1730         if (caps & ADVERTISED_100baseT_Full)
1731                 v |= FW_PORT_CAP_SPEED_100M;
1732         if (caps & ADVERTISED_1000baseT_Full)
1733                 v |= FW_PORT_CAP_SPEED_1G;
1734         if (caps & ADVERTISED_10000baseT_Full)
1735                 v |= FW_PORT_CAP_SPEED_10G;
1736         return v;
1737 }
1738
1739 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1740 {
1741         const struct port_info *p = netdev_priv(dev);
1742
1743         if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1744             p->port_type == FW_PORT_TYPE_BT_XFI ||
1745             p->port_type == FW_PORT_TYPE_BT_XAUI)
1746                 cmd->port = PORT_TP;
1747         else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1748                  p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1749                 cmd->port = PORT_FIBRE;
1750         else if (p->port_type == FW_PORT_TYPE_SFP) {
1751                 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1752                     p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1753                         cmd->port = PORT_DA;
1754                 else
1755                         cmd->port = PORT_FIBRE;
1756         } else
1757                 cmd->port = PORT_OTHER;
1758
1759         if (p->mdio_addr >= 0) {
1760                 cmd->phy_address = p->mdio_addr;
1761                 cmd->transceiver = XCVR_EXTERNAL;
1762                 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1763                         MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1764         } else {
1765                 cmd->phy_address = 0;  /* not really, but no better option */
1766                 cmd->transceiver = XCVR_INTERNAL;
1767                 cmd->mdio_support = 0;
1768         }
1769
1770         cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1771         cmd->advertising = from_fw_linkcaps(p->port_type,
1772                                             p->link_cfg.advertising);
1773         ethtool_cmd_speed_set(cmd,
1774                               netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1775         cmd->duplex = DUPLEX_FULL;
1776         cmd->autoneg = p->link_cfg.autoneg;
1777         cmd->maxtxpkt = 0;
1778         cmd->maxrxpkt = 0;
1779         return 0;
1780 }
1781
1782 static unsigned int speed_to_caps(int speed)
1783 {
1784         if (speed == SPEED_100)
1785                 return FW_PORT_CAP_SPEED_100M;
1786         if (speed == SPEED_1000)
1787                 return FW_PORT_CAP_SPEED_1G;
1788         if (speed == SPEED_10000)
1789                 return FW_PORT_CAP_SPEED_10G;
1790         return 0;
1791 }
1792
1793 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1794 {
1795         unsigned int cap;
1796         struct port_info *p = netdev_priv(dev);
1797         struct link_config *lc = &p->link_cfg;
1798         u32 speed = ethtool_cmd_speed(cmd);
1799
1800         if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
1801                 return -EINVAL;
1802
1803         if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1804                 /*
1805                  * PHY offers a single speed.  See if that's what's
1806                  * being requested.
1807                  */
1808                 if (cmd->autoneg == AUTONEG_DISABLE &&
1809                     (lc->supported & speed_to_caps(speed)))
1810                         return 0;
1811                 return -EINVAL;
1812         }
1813
1814         if (cmd->autoneg == AUTONEG_DISABLE) {
1815                 cap = speed_to_caps(speed);
1816
1817                 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1818                     (speed == SPEED_10000))
1819                         return -EINVAL;
1820                 lc->requested_speed = cap;
1821                 lc->advertising = 0;
1822         } else {
1823                 cap = to_fw_linkcaps(cmd->advertising);
1824                 if (!(lc->supported & cap))
1825                         return -EINVAL;
1826                 lc->requested_speed = 0;
1827                 lc->advertising = cap | FW_PORT_CAP_ANEG;
1828         }
1829         lc->autoneg = cmd->autoneg;
1830
1831         if (netif_running(dev))
1832                 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1833                                      lc);
1834         return 0;
1835 }
1836
1837 static void get_pauseparam(struct net_device *dev,
1838                            struct ethtool_pauseparam *epause)
1839 {
1840         struct port_info *p = netdev_priv(dev);
1841
1842         epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1843         epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1844         epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1845 }
1846
1847 static int set_pauseparam(struct net_device *dev,
1848                           struct ethtool_pauseparam *epause)
1849 {
1850         struct port_info *p = netdev_priv(dev);
1851         struct link_config *lc = &p->link_cfg;
1852
1853         if (epause->autoneg == AUTONEG_DISABLE)
1854                 lc->requested_fc = 0;
1855         else if (lc->supported & FW_PORT_CAP_ANEG)
1856                 lc->requested_fc = PAUSE_AUTONEG;
1857         else
1858                 return -EINVAL;
1859
1860         if (epause->rx_pause)
1861                 lc->requested_fc |= PAUSE_RX;
1862         if (epause->tx_pause)
1863                 lc->requested_fc |= PAUSE_TX;
1864         if (netif_running(dev))
1865                 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1866                                      lc);
1867         return 0;
1868 }
1869
1870 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1871 {
1872         const struct port_info *pi = netdev_priv(dev);
1873         const struct sge *s = &pi->adapter->sge;
1874
1875         e->rx_max_pending = MAX_RX_BUFFERS;
1876         e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1877         e->rx_jumbo_max_pending = 0;
1878         e->tx_max_pending = MAX_TXQ_ENTRIES;
1879
1880         e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1881         e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1882         e->rx_jumbo_pending = 0;
1883         e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1884 }
1885
1886 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1887 {
1888         int i;
1889         const struct port_info *pi = netdev_priv(dev);
1890         struct adapter *adapter = pi->adapter;
1891         struct sge *s = &adapter->sge;
1892
1893         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1894             e->tx_pending > MAX_TXQ_ENTRIES ||
1895             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1896             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1897             e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1898                 return -EINVAL;
1899
1900         if (adapter->flags & FULL_INIT_DONE)
1901                 return -EBUSY;
1902
1903         for (i = 0; i < pi->nqsets; ++i) {
1904                 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1905                 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1906                 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1907         }
1908         return 0;
1909 }
1910
1911 static int closest_timer(const struct sge *s, int time)
1912 {
1913         int i, delta, match = 0, min_delta = INT_MAX;
1914
1915         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1916                 delta = time - s->timer_val[i];
1917                 if (delta < 0)
1918                         delta = -delta;
1919                 if (delta < min_delta) {
1920                         min_delta = delta;
1921                         match = i;
1922                 }
1923         }
1924         return match;
1925 }
1926
1927 static int closest_thres(const struct sge *s, int thres)
1928 {
1929         int i, delta, match = 0, min_delta = INT_MAX;
1930
1931         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1932                 delta = thres - s->counter_val[i];
1933                 if (delta < 0)
1934                         delta = -delta;
1935                 if (delta < min_delta) {
1936                         min_delta = delta;
1937                         match = i;
1938                 }
1939         }
1940         return match;
1941 }
1942
1943 /*
1944  * Return a queue's interrupt hold-off time in us.  0 means no timer.
1945  */
1946 static unsigned int qtimer_val(const struct adapter *adap,
1947                                const struct sge_rspq *q)
1948 {
1949         unsigned int idx = q->intr_params >> 1;
1950
1951         return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1952 }
1953
1954 /**
1955  *      set_rxq_intr_params - set a queue's interrupt holdoff parameters
1956  *      @adap: the adapter
1957  *      @q: the Rx queue
1958  *      @us: the hold-off time in us, or 0 to disable timer
1959  *      @cnt: the hold-off packet count, or 0 to disable counter
1960  *
1961  *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
1962  *      one of the two needs to be enabled for the queue to generate interrupts.
1963  */
1964 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1965                                unsigned int us, unsigned int cnt)
1966 {
1967         if ((us | cnt) == 0)
1968                 cnt = 1;
1969
1970         if (cnt) {
1971                 int err;
1972                 u32 v, new_idx;
1973
1974                 new_idx = closest_thres(&adap->sge, cnt);
1975                 if (q->desc && q->pktcnt_idx != new_idx) {
1976                         /* the queue has already been created, update it */
1977                         v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1978                             FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1979                             FW_PARAMS_PARAM_YZ(q->cntxt_id);
1980                         err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1981                                             &new_idx);
1982                         if (err)
1983                                 return err;
1984                 }
1985                 q->pktcnt_idx = new_idx;
1986         }
1987
1988         us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1989         q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1990         return 0;
1991 }
1992
1993 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1994 {
1995         const struct port_info *pi = netdev_priv(dev);
1996         struct adapter *adap = pi->adapter;
1997         struct sge_rspq *q;
1998         int i;
1999         int r = 0;
2000
2001         for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2002                 q = &adap->sge.ethrxq[i].rspq;
2003                 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2004                         c->rx_max_coalesced_frames);
2005                 if (r) {
2006                         dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2007                         break;
2008                 }
2009         }
2010         return r;
2011 }
2012
2013 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2014 {
2015         const struct port_info *pi = netdev_priv(dev);
2016         const struct adapter *adap = pi->adapter;
2017         const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2018
2019         c->rx_coalesce_usecs = qtimer_val(adap, rq);
2020         c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2021                 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2022         return 0;
2023 }
2024
2025 /**
2026  *      eeprom_ptov - translate a physical EEPROM address to virtual
2027  *      @phys_addr: the physical EEPROM address
2028  *      @fn: the PCI function number
2029  *      @sz: size of function-specific area
2030  *
2031  *      Translate a physical EEPROM address to virtual.  The first 1K is
2032  *      accessed through virtual addresses starting at 31K, the rest is
2033  *      accessed through virtual addresses starting at 0.
2034  *
2035  *      The mapping is as follows:
2036  *      [0..1K) -> [31K..32K)
2037  *      [1K..1K+A) -> [31K-A..31K)
2038  *      [1K+A..ES) -> [0..ES-A-1K)
2039  *
2040  *      where A = @fn * @sz, and ES = EEPROM size.
2041  */
2042 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2043 {
2044         fn *= sz;
2045         if (phys_addr < 1024)
2046                 return phys_addr + (31 << 10);
2047         if (phys_addr < 1024 + fn)
2048                 return 31744 - fn + phys_addr - 1024;
2049         if (phys_addr < EEPROMSIZE)
2050                 return phys_addr - 1024 - fn;
2051         return -EINVAL;
2052 }
2053
2054 /*
2055  * The next two routines implement eeprom read/write from physical addresses.
2056  */
2057 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2058 {
2059         int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2060
2061         if (vaddr >= 0)
2062                 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2063         return vaddr < 0 ? vaddr : 0;
2064 }
2065
2066 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2067 {
2068         int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2069
2070         if (vaddr >= 0)
2071                 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2072         return vaddr < 0 ? vaddr : 0;
2073 }
2074
2075 #define EEPROM_MAGIC 0x38E2F10C
2076
2077 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2078                       u8 *data)
2079 {
2080         int i, err = 0;
2081         struct adapter *adapter = netdev2adap(dev);
2082
2083         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2084         if (!buf)
2085                 return -ENOMEM;
2086
2087         e->magic = EEPROM_MAGIC;
2088         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2089                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2090
2091         if (!err)
2092                 memcpy(data, buf + e->offset, e->len);
2093         kfree(buf);
2094         return err;
2095 }
2096
2097 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2098                       u8 *data)
2099 {
2100         u8 *buf;
2101         int err = 0;
2102         u32 aligned_offset, aligned_len, *p;
2103         struct adapter *adapter = netdev2adap(dev);
2104
2105         if (eeprom->magic != EEPROM_MAGIC)
2106                 return -EINVAL;
2107
2108         aligned_offset = eeprom->offset & ~3;
2109         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2110
2111         if (adapter->fn > 0) {
2112                 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2113
2114                 if (aligned_offset < start ||
2115                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
2116                         return -EPERM;
2117         }
2118
2119         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2120                 /*
2121                  * RMW possibly needed for first or last words.
2122                  */
2123                 buf = kmalloc(aligned_len, GFP_KERNEL);
2124                 if (!buf)
2125                         return -ENOMEM;
2126                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2127                 if (!err && aligned_len > 4)
2128                         err = eeprom_rd_phys(adapter,
2129                                              aligned_offset + aligned_len - 4,
2130                                              (u32 *)&buf[aligned_len - 4]);
2131                 if (err)
2132                         goto out;
2133                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2134         } else
2135                 buf = data;
2136
2137         err = t4_seeprom_wp(adapter, false);
2138         if (err)
2139                 goto out;
2140
2141         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2142                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2143                 aligned_offset += 4;
2144         }
2145
2146         if (!err)
2147                 err = t4_seeprom_wp(adapter, true);
2148 out:
2149         if (buf != data)
2150                 kfree(buf);
2151         return err;
2152 }
2153
2154 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2155 {
2156         int ret;
2157         const struct firmware *fw;
2158         struct adapter *adap = netdev2adap(netdev);
2159
2160         ef->data[sizeof(ef->data) - 1] = '\0';
2161         ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2162         if (ret < 0)
2163                 return ret;
2164
2165         ret = t4_load_fw(adap, fw->data, fw->size);
2166         release_firmware(fw);
2167         if (!ret)
2168                 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2169         return ret;
2170 }
2171
2172 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2173 #define BCAST_CRC 0xa0ccc1a6
2174
2175 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2176 {
2177         wol->supported = WAKE_BCAST | WAKE_MAGIC;
2178         wol->wolopts = netdev2adap(dev)->wol;
2179         memset(&wol->sopass, 0, sizeof(wol->sopass));
2180 }
2181
2182 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2183 {
2184         int err = 0;
2185         struct port_info *pi = netdev_priv(dev);
2186
2187         if (wol->wolopts & ~WOL_SUPPORTED)
2188                 return -EINVAL;
2189         t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2190                             (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2191         if (wol->wolopts & WAKE_BCAST) {
2192                 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2193                                         ~0ULL, 0, false);
2194                 if (!err)
2195                         err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2196                                                 ~6ULL, ~0ULL, BCAST_CRC, true);
2197         } else
2198                 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2199         return err;
2200 }
2201
2202 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2203 {
2204         const struct port_info *pi = netdev_priv(dev);
2205         netdev_features_t changed = dev->features ^ features;
2206         int err;
2207
2208         if (!(changed & NETIF_F_HW_VLAN_RX))
2209                 return 0;
2210
2211         err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2212                             -1, -1, -1,
2213                             !!(features & NETIF_F_HW_VLAN_RX), true);
2214         if (unlikely(err))
2215                 dev->features = features ^ NETIF_F_HW_VLAN_RX;
2216         return err;
2217 }
2218
2219 static u32 get_rss_table_size(struct net_device *dev)
2220 {
2221         const struct port_info *pi = netdev_priv(dev);
2222
2223         return pi->rss_size;
2224 }
2225
2226 static int get_rss_table(struct net_device *dev, u32 *p)
2227 {
2228         const struct port_info *pi = netdev_priv(dev);
2229         unsigned int n = pi->rss_size;
2230
2231         while (n--)
2232                 p[n] = pi->rss[n];
2233         return 0;
2234 }
2235
2236 static int set_rss_table(struct net_device *dev, const u32 *p)
2237 {
2238         unsigned int i;
2239         struct port_info *pi = netdev_priv(dev);
2240
2241         for (i = 0; i < pi->rss_size; i++)
2242                 pi->rss[i] = p[i];
2243         if (pi->adapter->flags & FULL_INIT_DONE)
2244                 return write_rss(pi, pi->rss);
2245         return 0;
2246 }
2247
2248 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2249                      u32 *rules)
2250 {
2251         const struct port_info *pi = netdev_priv(dev);
2252
2253         switch (info->cmd) {
2254         case ETHTOOL_GRXFH: {
2255                 unsigned int v = pi->rss_mode;
2256
2257                 info->data = 0;
2258                 switch (info->flow_type) {
2259                 case TCP_V4_FLOW:
2260                         if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2261                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2262                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2263                         else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2264                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2265                         break;
2266                 case UDP_V4_FLOW:
2267                         if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2268                             (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2269                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2270                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2271                         else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2272                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2273                         break;
2274                 case SCTP_V4_FLOW:
2275                 case AH_ESP_V4_FLOW:
2276                 case IPV4_FLOW:
2277                         if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2278                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2279                         break;
2280                 case TCP_V6_FLOW:
2281                         if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2282                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2283                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2284                         else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2285                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2286                         break;
2287                 case UDP_V6_FLOW:
2288                         if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2289                             (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2290                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2291                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2292                         else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2293                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2294                         break;
2295                 case SCTP_V6_FLOW:
2296                 case AH_ESP_V6_FLOW:
2297                 case IPV6_FLOW:
2298                         if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2299                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2300                         break;
2301                 }
2302                 return 0;
2303         }
2304         case ETHTOOL_GRXRINGS:
2305                 info->data = pi->nqsets;
2306                 return 0;
2307         }
2308         return -EOPNOTSUPP;
2309 }
2310
2311 static const struct ethtool_ops cxgb_ethtool_ops = {
2312         .get_settings      = get_settings,
2313         .set_settings      = set_settings,
2314         .get_drvinfo       = get_drvinfo,
2315         .get_msglevel      = get_msglevel,
2316         .set_msglevel      = set_msglevel,
2317         .get_ringparam     = get_sge_param,
2318         .set_ringparam     = set_sge_param,
2319         .get_coalesce      = get_coalesce,
2320         .set_coalesce      = set_coalesce,
2321         .get_eeprom_len    = get_eeprom_len,
2322         .get_eeprom        = get_eeprom,
2323         .set_eeprom        = set_eeprom,
2324         .get_pauseparam    = get_pauseparam,
2325         .set_pauseparam    = set_pauseparam,
2326         .get_link          = ethtool_op_get_link,
2327         .get_strings       = get_strings,
2328         .set_phys_id       = identify_port,
2329         .nway_reset        = restart_autoneg,
2330         .get_sset_count    = get_sset_count,
2331         .get_ethtool_stats = get_stats,
2332         .get_regs_len      = get_regs_len,
2333         .get_regs          = get_regs,
2334         .get_wol           = get_wol,
2335         .set_wol           = set_wol,
2336         .get_rxnfc         = get_rxnfc,
2337         .get_rxfh_indir_size = get_rss_table_size,
2338         .get_rxfh_indir    = get_rss_table,
2339         .set_rxfh_indir    = set_rss_table,
2340         .flash_device      = set_flash,
2341 };
2342
2343 /*
2344  * debugfs support
2345  */
2346 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2347                         loff_t *ppos)
2348 {
2349         loff_t pos = *ppos;
2350         loff_t avail = file->f_path.dentry->d_inode->i_size;
2351         unsigned int mem = (uintptr_t)file->private_data & 3;
2352         struct adapter *adap = file->private_data - mem;
2353
2354         if (pos < 0)
2355                 return -EINVAL;
2356         if (pos >= avail)
2357                 return 0;
2358         if (count > avail - pos)
2359                 count = avail - pos;
2360
2361         while (count) {
2362                 size_t len;
2363                 int ret, ofst;
2364                 __be32 data[16];
2365
2366                 if (mem == MEM_MC)
2367                         ret = t4_mc_read(adap, pos, data, NULL);
2368                 else
2369                         ret = t4_edc_read(adap, mem, pos, data, NULL);
2370                 if (ret)
2371                         return ret;
2372
2373                 ofst = pos % sizeof(data);
2374                 len = min(count, sizeof(data) - ofst);
2375                 if (copy_to_user(buf, (u8 *)data + ofst, len))
2376                         return -EFAULT;
2377
2378                 buf += len;
2379                 pos += len;
2380                 count -= len;
2381         }
2382         count = pos - *ppos;
2383         *ppos = pos;
2384         return count;
2385 }
2386
2387 static const struct file_operations mem_debugfs_fops = {
2388         .owner   = THIS_MODULE,
2389         .open    = simple_open,
2390         .read    = mem_read,
2391         .llseek  = default_llseek,
2392 };
2393
2394 static void add_debugfs_mem(struct adapter *adap, const char *name,
2395                             unsigned int idx, unsigned int size_mb)
2396 {
2397         struct dentry *de;
2398
2399         de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2400                                  (void *)adap + idx, &mem_debugfs_fops);
2401         if (de && de->d_inode)
2402                 de->d_inode->i_size = size_mb << 20;
2403 }
2404
2405 static int setup_debugfs(struct adapter *adap)
2406 {
2407         int i;
2408
2409         if (IS_ERR_OR_NULL(adap->debugfs_root))
2410                 return -1;
2411
2412         i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2413         if (i & EDRAM0_ENABLE)
2414                 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2415         if (i & EDRAM1_ENABLE)
2416                 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2417         if (i & EXT_MEM_ENABLE)
2418                 add_debugfs_mem(adap, "mc", MEM_MC,
2419                         EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2420         if (adap->l2t)
2421                 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2422                                     &t4_l2t_fops);
2423         return 0;
2424 }
2425
2426 /*
2427  * upper-layer driver support
2428  */
2429
2430 /*
2431  * Allocate an active-open TID and set it to the supplied value.
2432  */
2433 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2434 {
2435         int atid = -1;
2436
2437         spin_lock_bh(&t->atid_lock);
2438         if (t->afree) {
2439                 union aopen_entry *p = t->afree;
2440
2441                 atid = (p - t->atid_tab) + t->atid_base;
2442                 t->afree = p->next;
2443                 p->data = data;
2444                 t->atids_in_use++;
2445         }
2446         spin_unlock_bh(&t->atid_lock);
2447         return atid;
2448 }
2449 EXPORT_SYMBOL(cxgb4_alloc_atid);
2450
2451 /*
2452  * Release an active-open TID.
2453  */
2454 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2455 {
2456         union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2457
2458         spin_lock_bh(&t->atid_lock);
2459         p->next = t->afree;
2460         t->afree = p;
2461         t->atids_in_use--;
2462         spin_unlock_bh(&t->atid_lock);
2463 }
2464 EXPORT_SYMBOL(cxgb4_free_atid);
2465
2466 /*
2467  * Allocate a server TID and set it to the supplied value.
2468  */
2469 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2470 {
2471         int stid;
2472
2473         spin_lock_bh(&t->stid_lock);
2474         if (family == PF_INET) {
2475                 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2476                 if (stid < t->nstids)
2477                         __set_bit(stid, t->stid_bmap);
2478                 else
2479                         stid = -1;
2480         } else {
2481                 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2482                 if (stid < 0)
2483                         stid = -1;
2484         }
2485         if (stid >= 0) {
2486                 t->stid_tab[stid].data = data;
2487                 stid += t->stid_base;
2488                 t->stids_in_use++;
2489         }
2490         spin_unlock_bh(&t->stid_lock);
2491         return stid;
2492 }
2493 EXPORT_SYMBOL(cxgb4_alloc_stid);
2494
2495 /* Allocate a server filter TID and set it to the supplied value.
2496  */
2497 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2498 {
2499         int stid;
2500
2501         spin_lock_bh(&t->stid_lock);
2502         if (family == PF_INET) {
2503                 stid = find_next_zero_bit(t->stid_bmap,
2504                                 t->nstids + t->nsftids, t->nstids);
2505                 if (stid < (t->nstids + t->nsftids))
2506                         __set_bit(stid, t->stid_bmap);
2507                 else
2508                         stid = -1;
2509         } else {
2510                 stid = -1;
2511         }
2512         if (stid >= 0) {
2513                 t->stid_tab[stid].data = data;
2514                 stid += t->stid_base;
2515                 t->stids_in_use++;
2516         }
2517         spin_unlock_bh(&t->stid_lock);
2518         return stid;
2519 }
2520 EXPORT_SYMBOL(cxgb4_alloc_sftid);
2521
2522 /* Release a server TID.
2523  */
2524 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2525 {
2526         stid -= t->stid_base;
2527         spin_lock_bh(&t->stid_lock);
2528         if (family == PF_INET)
2529                 __clear_bit(stid, t->stid_bmap);
2530         else
2531                 bitmap_release_region(t->stid_bmap, stid, 2);
2532         t->stid_tab[stid].data = NULL;
2533         t->stids_in_use--;
2534         spin_unlock_bh(&t->stid_lock);
2535 }
2536 EXPORT_SYMBOL(cxgb4_free_stid);
2537
2538 /*
2539  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
2540  */
2541 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2542                            unsigned int tid)
2543 {
2544         struct cpl_tid_release *req;
2545
2546         set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2547         req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2548         INIT_TP_WR(req, tid);
2549         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2550 }
2551
2552 /*
2553  * Queue a TID release request and if necessary schedule a work queue to
2554  * process it.
2555  */
2556 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2557                                     unsigned int tid)
2558 {
2559         void **p = &t->tid_tab[tid];
2560         struct adapter *adap = container_of(t, struct adapter, tids);
2561
2562         spin_lock_bh(&adap->tid_release_lock);
2563         *p = adap->tid_release_head;
2564         /* Low 2 bits encode the Tx channel number */
2565         adap->tid_release_head = (void **)((uintptr_t)p | chan);
2566         if (!adap->tid_release_task_busy) {
2567                 adap->tid_release_task_busy = true;
2568                 queue_work(workq, &adap->tid_release_task);
2569         }
2570         spin_unlock_bh(&adap->tid_release_lock);
2571 }
2572
2573 /*
2574  * Process the list of pending TID release requests.
2575  */
2576 static void process_tid_release_list(struct work_struct *work)
2577 {
2578         struct sk_buff *skb;
2579         struct adapter *adap;
2580
2581         adap = container_of(work, struct adapter, tid_release_task);
2582
2583         spin_lock_bh(&adap->tid_release_lock);
2584         while (adap->tid_release_head) {
2585                 void **p = adap->tid_release_head;
2586                 unsigned int chan = (uintptr_t)p & 3;
2587                 p = (void *)p - chan;
2588
2589                 adap->tid_release_head = *p;
2590                 *p = NULL;
2591                 spin_unlock_bh(&adap->tid_release_lock);
2592
2593                 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2594                                          GFP_KERNEL)))
2595                         schedule_timeout_uninterruptible(1);
2596
2597                 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2598                 t4_ofld_send(adap, skb);
2599                 spin_lock_bh(&adap->tid_release_lock);
2600         }
2601         adap->tid_release_task_busy = false;
2602         spin_unlock_bh(&adap->tid_release_lock);
2603 }
2604
2605 /*
2606  * Release a TID and inform HW.  If we are unable to allocate the release
2607  * message we defer to a work queue.
2608  */
2609 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2610 {
2611         void *old;
2612         struct sk_buff *skb;
2613         struct adapter *adap = container_of(t, struct adapter, tids);
2614
2615         old = t->tid_tab[tid];
2616         skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2617         if (likely(skb)) {
2618                 t->tid_tab[tid] = NULL;
2619                 mk_tid_release(skb, chan, tid);
2620                 t4_ofld_send(adap, skb);
2621         } else
2622                 cxgb4_queue_tid_release(t, chan, tid);
2623         if (old)
2624                 atomic_dec(&t->tids_in_use);
2625 }
2626 EXPORT_SYMBOL(cxgb4_remove_tid);
2627
2628 /*
2629  * Allocate and initialize the TID tables.  Returns 0 on success.
2630  */
2631 static int tid_init(struct tid_info *t)
2632 {
2633         size_t size;
2634         unsigned int stid_bmap_size;
2635         unsigned int natids = t->natids;
2636
2637         stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
2638         size = t->ntids * sizeof(*t->tid_tab) +
2639                natids * sizeof(*t->atid_tab) +
2640                t->nstids * sizeof(*t->stid_tab) +
2641                t->nsftids * sizeof(*t->stid_tab) +
2642                stid_bmap_size * sizeof(long) +
2643                t->nftids * sizeof(*t->ftid_tab) +
2644                t->nsftids * sizeof(*t->ftid_tab);
2645
2646         t->tid_tab = t4_alloc_mem(size);
2647         if (!t->tid_tab)
2648                 return -ENOMEM;
2649
2650         t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2651         t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2652         t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
2653         t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
2654         spin_lock_init(&t->stid_lock);
2655         spin_lock_init(&t->atid_lock);
2656
2657         t->stids_in_use = 0;
2658         t->afree = NULL;
2659         t->atids_in_use = 0;
2660         atomic_set(&t->tids_in_use, 0);
2661
2662         /* Setup the free list for atid_tab and clear the stid bitmap. */
2663         if (natids) {
2664                 while (--natids)
2665                         t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2666                 t->afree = t->atid_tab;
2667         }
2668         bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
2669         return 0;
2670 }
2671
2672 /**
2673  *      cxgb4_create_server - create an IP server
2674  *      @dev: the device
2675  *      @stid: the server TID
2676  *      @sip: local IP address to bind server to
2677  *      @sport: the server's TCP port
2678  *      @queue: queue to direct messages from this server to
2679  *
2680  *      Create an IP server for the given port and address.
2681  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
2682  */
2683 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2684                         __be32 sip, __be16 sport, __be16 vlan,
2685                         unsigned int queue)
2686 {
2687         unsigned int chan;
2688         struct sk_buff *skb;
2689         struct adapter *adap;
2690         struct cpl_pass_open_req *req;
2691
2692         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2693         if (!skb)
2694                 return -ENOMEM;
2695
2696         adap = netdev2adap(dev);
2697         req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2698         INIT_TP_WR(req, 0);
2699         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2700         req->local_port = sport;
2701         req->peer_port = htons(0);
2702         req->local_ip = sip;
2703         req->peer_ip = htonl(0);
2704         chan = rxq_to_chan(&adap->sge, queue);
2705         req->opt0 = cpu_to_be64(TX_CHAN(chan));
2706         req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2707                                 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2708         return t4_mgmt_tx(adap, skb);
2709 }
2710 EXPORT_SYMBOL(cxgb4_create_server);
2711
2712 /**
2713  *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2714  *      @mtus: the HW MTU table
2715  *      @mtu: the target MTU
2716  *      @idx: index of selected entry in the MTU table
2717  *
2718  *      Returns the index and the value in the HW MTU table that is closest to
2719  *      but does not exceed @mtu, unless @mtu is smaller than any value in the
2720  *      table, in which case that smallest available value is selected.
2721  */
2722 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2723                             unsigned int *idx)
2724 {
2725         unsigned int i = 0;
2726
2727         while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2728                 ++i;
2729         if (idx)
2730                 *idx = i;
2731         return mtus[i];
2732 }
2733 EXPORT_SYMBOL(cxgb4_best_mtu);
2734
2735 /**
2736  *      cxgb4_port_chan - get the HW channel of a port
2737  *      @dev: the net device for the port
2738  *
2739  *      Return the HW Tx channel of the given port.
2740  */
2741 unsigned int cxgb4_port_chan(const struct net_device *dev)
2742 {
2743         return netdev2pinfo(dev)->tx_chan;
2744 }
2745 EXPORT_SYMBOL(cxgb4_port_chan);
2746
2747 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2748 {
2749         struct adapter *adap = netdev2adap(dev);
2750         u32 v;
2751
2752         v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2753         return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
2754 }
2755 EXPORT_SYMBOL(cxgb4_dbfifo_count);
2756
2757 /**
2758  *      cxgb4_port_viid - get the VI id of a port
2759  *      @dev: the net device for the port
2760  *
2761  *      Return the VI id of the given port.
2762  */
2763 unsigned int cxgb4_port_viid(const struct net_device *dev)
2764 {
2765         return netdev2pinfo(dev)->viid;
2766 }
2767 EXPORT_SYMBOL(cxgb4_port_viid);
2768
2769 /**
2770  *      cxgb4_port_idx - get the index of a port
2771  *      @dev: the net device for the port
2772  *
2773  *      Return the index of the given port.
2774  */
2775 unsigned int cxgb4_port_idx(const struct net_device *dev)
2776 {
2777         return netdev2pinfo(dev)->port_id;
2778 }
2779 EXPORT_SYMBOL(cxgb4_port_idx);
2780
2781 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2782                          struct tp_tcp_stats *v6)
2783 {
2784         struct adapter *adap = pci_get_drvdata(pdev);
2785
2786         spin_lock(&adap->stats_lock);
2787         t4_tp_get_tcp_stats(adap, v4, v6);
2788         spin_unlock(&adap->stats_lock);
2789 }
2790 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2791
2792 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2793                       const unsigned int *pgsz_order)
2794 {
2795         struct adapter *adap = netdev2adap(dev);
2796
2797         t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2798         t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2799                      HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2800                      HPZ3(pgsz_order[3]));
2801 }
2802 EXPORT_SYMBOL(cxgb4_iscsi_init);
2803
2804 int cxgb4_flush_eq_cache(struct net_device *dev)
2805 {
2806         struct adapter *adap = netdev2adap(dev);
2807         int ret;
2808
2809         ret = t4_fwaddrspace_write(adap, adap->mbox,
2810                                    0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
2811         return ret;
2812 }
2813 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2814
2815 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2816 {
2817         u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
2818         __be64 indices;
2819         int ret;
2820
2821         ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2822         if (!ret) {
2823                 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2824                 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2825         }
2826         return ret;
2827 }
2828
2829 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2830                         u16 size)
2831 {
2832         struct adapter *adap = netdev2adap(dev);
2833         u16 hw_pidx, hw_cidx;
2834         int ret;
2835
2836         ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2837         if (ret)
2838                 goto out;
2839
2840         if (pidx != hw_pidx) {
2841                 u16 delta;
2842
2843                 if (pidx >= hw_pidx)
2844                         delta = pidx - hw_pidx;
2845                 else
2846                         delta = size - hw_pidx + pidx;
2847                 wmb();
2848                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2849                              QID(qid) | PIDX(delta));
2850         }
2851 out:
2852         return ret;
2853 }
2854 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2855
2856 static struct pci_driver cxgb4_driver;
2857
2858 static void check_neigh_update(struct neighbour *neigh)
2859 {
2860         const struct device *parent;
2861         const struct net_device *netdev = neigh->dev;
2862
2863         if (netdev->priv_flags & IFF_802_1Q_VLAN)
2864                 netdev = vlan_dev_real_dev(netdev);
2865         parent = netdev->dev.parent;
2866         if (parent && parent->driver == &cxgb4_driver.driver)
2867                 t4_l2t_update(dev_get_drvdata(parent), neigh);
2868 }
2869
2870 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2871                        void *data)
2872 {
2873         switch (event) {
2874         case NETEVENT_NEIGH_UPDATE:
2875                 check_neigh_update(data);
2876                 break;
2877         case NETEVENT_REDIRECT:
2878         default:
2879                 break;
2880         }
2881         return 0;
2882 }
2883
2884 static bool netevent_registered;
2885 static struct notifier_block cxgb4_netevent_nb = {
2886         .notifier_call = netevent_cb
2887 };
2888
2889 static void drain_db_fifo(struct adapter *adap, int usecs)
2890 {
2891         u32 v;
2892
2893         do {
2894                 set_current_state(TASK_UNINTERRUPTIBLE);
2895                 schedule_timeout(usecs_to_jiffies(usecs));
2896                 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2897                 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2898                         break;
2899         } while (1);
2900 }
2901
2902 static void disable_txq_db(struct sge_txq *q)
2903 {
2904         spin_lock_irq(&q->db_lock);
2905         q->db_disabled = 1;
2906         spin_unlock_irq(&q->db_lock);
2907 }
2908
2909 static void enable_txq_db(struct sge_txq *q)
2910 {
2911         spin_lock_irq(&q->db_lock);
2912         q->db_disabled = 0;
2913         spin_unlock_irq(&q->db_lock);
2914 }
2915
2916 static void disable_dbs(struct adapter *adap)
2917 {
2918         int i;
2919
2920         for_each_ethrxq(&adap->sge, i)
2921                 disable_txq_db(&adap->sge.ethtxq[i].q);
2922         for_each_ofldrxq(&adap->sge, i)
2923                 disable_txq_db(&adap->sge.ofldtxq[i].q);
2924         for_each_port(adap, i)
2925                 disable_txq_db(&adap->sge.ctrlq[i].q);
2926 }
2927
2928 static void enable_dbs(struct adapter *adap)
2929 {
2930         int i;
2931
2932         for_each_ethrxq(&adap->sge, i)
2933                 enable_txq_db(&adap->sge.ethtxq[i].q);
2934         for_each_ofldrxq(&adap->sge, i)
2935                 enable_txq_db(&adap->sge.ofldtxq[i].q);
2936         for_each_port(adap, i)
2937                 enable_txq_db(&adap->sge.ctrlq[i].q);
2938 }
2939
2940 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2941 {
2942         u16 hw_pidx, hw_cidx;
2943         int ret;
2944
2945         spin_lock_bh(&q->db_lock);
2946         ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2947         if (ret)
2948                 goto out;
2949         if (q->db_pidx != hw_pidx) {
2950                 u16 delta;
2951
2952                 if (q->db_pidx >= hw_pidx)
2953                         delta = q->db_pidx - hw_pidx;
2954                 else
2955                         delta = q->size - hw_pidx + q->db_pidx;
2956                 wmb();
2957                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2958                              QID(q->cntxt_id) | PIDX(delta));
2959         }
2960 out:
2961         q->db_disabled = 0;
2962         spin_unlock_bh(&q->db_lock);
2963         if (ret)
2964                 CH_WARN(adap, "DB drop recovery failed.\n");
2965 }
2966 static void recover_all_queues(struct adapter *adap)
2967 {
2968         int i;
2969
2970         for_each_ethrxq(&adap->sge, i)
2971                 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2972         for_each_ofldrxq(&adap->sge, i)
2973                 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2974         for_each_port(adap, i)
2975                 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2976 }
2977
2978 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2979 {
2980         mutex_lock(&uld_mutex);
2981         if (adap->uld_handle[CXGB4_ULD_RDMA])
2982                 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2983                                 cmd);
2984         mutex_unlock(&uld_mutex);
2985 }
2986
2987 static void process_db_full(struct work_struct *work)
2988 {
2989         struct adapter *adap;
2990
2991         adap = container_of(work, struct adapter, db_full_task);
2992
2993         notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2994         drain_db_fifo(adap, dbfifo_drain_delay);
2995         t4_set_reg_field(adap, SGE_INT_ENABLE3,
2996                          DBFIFO_HP_INT | DBFIFO_LP_INT,
2997                          DBFIFO_HP_INT | DBFIFO_LP_INT);
2998         notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2999 }
3000
3001 static void process_db_drop(struct work_struct *work)
3002 {
3003         struct adapter *adap;
3004
3005         adap = container_of(work, struct adapter, db_drop_task);
3006
3007         t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3008         disable_dbs(adap);
3009         notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3010         drain_db_fifo(adap, 1);
3011         recover_all_queues(adap);
3012         enable_dbs(adap);
3013 }
3014
3015 void t4_db_full(struct adapter *adap)
3016 {
3017         t4_set_reg_field(adap, SGE_INT_ENABLE3,
3018                          DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3019         queue_work(workq, &adap->db_full_task);
3020 }
3021
3022 void t4_db_dropped(struct adapter *adap)
3023 {
3024         queue_work(workq, &adap->db_drop_task);
3025 }
3026
3027 static void uld_attach(struct adapter *adap, unsigned int uld)
3028 {
3029         void *handle;
3030         struct cxgb4_lld_info lli;
3031         unsigned short i;
3032
3033         lli.pdev = adap->pdev;
3034         lli.l2t = adap->l2t;
3035         lli.tids = &adap->tids;
3036         lli.ports = adap->port;
3037         lli.vr = &adap->vres;
3038         lli.mtus = adap->params.mtus;
3039         if (uld == CXGB4_ULD_RDMA) {
3040                 lli.rxq_ids = adap->sge.rdma_rxq;
3041                 lli.nrxq = adap->sge.rdmaqs;
3042         } else if (uld == CXGB4_ULD_ISCSI) {
3043                 lli.rxq_ids = adap->sge.ofld_rxq;
3044                 lli.nrxq = adap->sge.ofldqsets;
3045         }
3046         lli.ntxq = adap->sge.ofldqsets;
3047         lli.nchan = adap->params.nports;
3048         lli.nports = adap->params.nports;
3049         lli.wr_cred = adap->params.ofldq_wr_cred;
3050         lli.adapter_type = adap->params.rev;
3051         lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3052         lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3053                         t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3054                         (adap->fn * 4));
3055         lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3056                         t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3057                         (adap->fn * 4));
3058         lli.filt_mode = adap->filter_mode;
3059         /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3060         for (i = 0; i < NCHAN; i++)
3061                 lli.tx_modq[i] = i;
3062         lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3063         lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3064         lli.fw_vers = adap->params.fw_vers;
3065         lli.dbfifo_int_thresh = dbfifo_int_thresh;
3066         lli.sge_pktshift = adap->sge.pktshift;
3067         lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3068
3069         handle = ulds[uld].add(&lli);
3070         if (IS_ERR(handle)) {
3071                 dev_warn(adap->pdev_dev,
3072                          "could not attach to the %s driver, error %ld\n",
3073                          uld_str[uld], PTR_ERR(handle));
3074                 return;
3075         }
3076
3077         adap->uld_handle[uld] = handle;
3078
3079         if (!netevent_registered) {
3080                 register_netevent_notifier(&cxgb4_netevent_nb);
3081                 netevent_registered = true;
3082         }
3083
3084         if (adap->flags & FULL_INIT_DONE)
3085                 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3086 }
3087
3088 static void attach_ulds(struct adapter *adap)
3089 {
3090         unsigned int i;
3091
3092         mutex_lock(&uld_mutex);
3093         list_add_tail(&adap->list_node, &adapter_list);
3094         for (i = 0; i < CXGB4_ULD_MAX; i++)
3095                 if (ulds[i].add)
3096                         uld_attach(adap, i);
3097         mutex_unlock(&uld_mutex);
3098 }
3099
3100 static void detach_ulds(struct adapter *adap)
3101 {
3102         unsigned int i;
3103
3104         mutex_lock(&uld_mutex);
3105         list_del(&adap->list_node);
3106         for (i = 0; i < CXGB4_ULD_MAX; i++)
3107                 if (adap->uld_handle[i]) {
3108                         ulds[i].state_change(adap->uld_handle[i],
3109                                              CXGB4_STATE_DETACH);
3110                         adap->uld_handle[i] = NULL;
3111                 }
3112         if (netevent_registered && list_empty(&adapter_list)) {
3113                 unregister_netevent_notifier(&cxgb4_netevent_nb);
3114                 netevent_registered = false;
3115         }
3116         mutex_unlock(&uld_mutex);
3117 }
3118
3119 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3120 {
3121         unsigned int i;
3122
3123         mutex_lock(&uld_mutex);
3124         for (i = 0; i < CXGB4_ULD_MAX; i++)
3125                 if (adap->uld_handle[i])
3126                         ulds[i].state_change(adap->uld_handle[i], new_state);
3127         mutex_unlock(&uld_mutex);
3128 }
3129
3130 /**
3131  *      cxgb4_register_uld - register an upper-layer driver
3132  *      @type: the ULD type
3133  *      @p: the ULD methods
3134  *
3135  *      Registers an upper-layer driver with this driver and notifies the ULD
3136  *      about any presently available devices that support its type.  Returns
3137  *      %-EBUSY if a ULD of the same type is already registered.
3138  */
3139 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3140 {
3141         int ret = 0;
3142         struct adapter *adap;
3143
3144         if (type >= CXGB4_ULD_MAX)
3145                 return -EINVAL;
3146         mutex_lock(&uld_mutex);
3147         if (ulds[type].add) {
3148                 ret = -EBUSY;
3149                 goto out;
3150         }
3151         ulds[type] = *p;
3152         list_for_each_entry(adap, &adapter_list, list_node)
3153                 uld_attach(adap, type);
3154 out:    mutex_unlock(&uld_mutex);
3155         return ret;
3156 }
3157 EXPORT_SYMBOL(cxgb4_register_uld);
3158
3159 /**
3160  *      cxgb4_unregister_uld - unregister an upper-layer driver
3161  *      @type: the ULD type
3162  *
3163  *      Unregisters an existing upper-layer driver.
3164  */
3165 int cxgb4_unregister_uld(enum cxgb4_uld type)
3166 {
3167         struct adapter *adap;
3168
3169         if (type >= CXGB4_ULD_MAX)
3170                 return -EINVAL;
3171         mutex_lock(&uld_mutex);
3172         list_for_each_entry(adap, &adapter_list, list_node)
3173                 adap->uld_handle[type] = NULL;
3174         ulds[type].add = NULL;
3175         mutex_unlock(&uld_mutex);
3176         return 0;
3177 }
3178 EXPORT_SYMBOL(cxgb4_unregister_uld);
3179
3180 /**
3181  *      cxgb_up - enable the adapter
3182  *      @adap: adapter being enabled
3183  *
3184  *      Called when the first port is enabled, this function performs the
3185  *      actions necessary to make an adapter operational, such as completing
3186  *      the initialization of HW modules, and enabling interrupts.
3187  *
3188  *      Must be called with the rtnl lock held.
3189  */
3190 static int cxgb_up(struct adapter *adap)
3191 {
3192         int err;
3193
3194         err = setup_sge_queues(adap);
3195         if (err)
3196                 goto out;
3197         err = setup_rss(adap);
3198         if (err)
3199                 goto freeq;
3200
3201         if (adap->flags & USING_MSIX) {
3202                 name_msix_vecs(adap);
3203                 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3204                                   adap->msix_info[0].desc, adap);
3205                 if (err)
3206                         goto irq_err;
3207
3208                 err = request_msix_queue_irqs(adap);
3209                 if (err) {
3210                         free_irq(adap->msix_info[0].vec, adap);
3211                         goto irq_err;
3212                 }
3213         } else {
3214                 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3215                                   (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
3216                                   adap->port[0]->name, adap);
3217                 if (err)
3218                         goto irq_err;
3219         }
3220         enable_rx(adap);
3221         t4_sge_start(adap);
3222         t4_intr_enable(adap);
3223         adap->flags |= FULL_INIT_DONE;
3224         notify_ulds(adap, CXGB4_STATE_UP);
3225  out:
3226         return err;
3227  irq_err:
3228         dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
3229  freeq:
3230         t4_free_sge_resources(adap);
3231         goto out;
3232 }
3233
3234 static void cxgb_down(struct adapter *adapter)
3235 {
3236         t4_intr_disable(adapter);
3237         cancel_work_sync(&adapter->tid_release_task);
3238         cancel_work_sync(&adapter->db_full_task);
3239         cancel_work_sync(&adapter->db_drop_task);
3240         adapter->tid_release_task_busy = false;
3241         adapter->tid_release_head = NULL;
3242
3243         if (adapter->flags & USING_MSIX) {
3244                 free_msix_queue_irqs(adapter);
3245                 free_irq(adapter->msix_info[0].vec, adapter);
3246         } else
3247                 free_irq(adapter->pdev->irq, adapter);
3248         quiesce_rx(adapter);
3249         t4_sge_stop(adapter);
3250         t4_free_sge_resources(adapter);
3251         adapter->flags &= ~FULL_INIT_DONE;
3252 }
3253
3254 /*
3255  * net_device operations
3256  */
3257 static int cxgb_open(struct net_device *dev)
3258 {
3259         int err;
3260         struct port_info *pi = netdev_priv(dev);
3261         struct adapter *adapter = pi->adapter;
3262
3263         netif_carrier_off(dev);
3264
3265         if (!(adapter->flags & FULL_INIT_DONE)) {
3266                 err = cxgb_up(adapter);
3267                 if (err < 0)
3268                         return err;
3269         }
3270
3271         err = link_start(dev);
3272         if (!err)
3273                 netif_tx_start_all_queues(dev);
3274         return err;
3275 }
3276
3277 static int cxgb_close(struct net_device *dev)
3278 {
3279         struct port_info *pi = netdev_priv(dev);
3280         struct adapter *adapter = pi->adapter;
3281
3282         netif_tx_stop_all_queues(dev);
3283         netif_carrier_off(dev);
3284         return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3285 }
3286
3287 /* Return an error number if the indicated filter isn't writable ...
3288  */
3289 static int writable_filter(struct filter_entry *f)
3290 {
3291         if (f->locked)
3292                 return -EPERM;
3293         if (f->pending)
3294                 return -EBUSY;
3295
3296         return 0;
3297 }
3298
3299 /* Delete the filter at the specified index (if valid).  The checks for all
3300  * the common problems with doing this like the filter being locked, currently
3301  * pending in another operation, etc.
3302  */
3303 static int delete_filter(struct adapter *adapter, unsigned int fidx)
3304 {
3305         struct filter_entry *f;
3306         int ret;
3307
3308         if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
3309                 return -EINVAL;
3310
3311         f = &adapter->tids.ftid_tab[fidx];
3312         ret = writable_filter(f);
3313         if (ret)
3314                 return ret;
3315         if (f->valid)
3316                 return del_filter_wr(adapter, fidx);
3317
3318         return 0;
3319 }
3320
3321 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3322                 __be32 sip, __be16 sport, __be16 vlan,
3323                 unsigned int queue, unsigned char port, unsigned char mask)
3324 {
3325         int ret;
3326         struct filter_entry *f;
3327         struct adapter *adap;
3328         int i;
3329         u8 *val;
3330
3331         adap = netdev2adap(dev);
3332
3333         /* Adjust stid to correct filter index */
3334         stid -= adap->tids.nstids;
3335         stid += adap->tids.nftids;
3336
3337         /* Check to make sure the filter requested is writable ...
3338          */
3339         f = &adap->tids.ftid_tab[stid];
3340         ret = writable_filter(f);
3341         if (ret)
3342                 return ret;
3343
3344         /* Clear out any old resources being used by the filter before
3345          * we start constructing the new filter.
3346          */
3347         if (f->valid)
3348                 clear_filter(adap, f);
3349
3350         /* Clear out filter specifications */
3351         memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3352         f->fs.val.lport = cpu_to_be16(sport);
3353         f->fs.mask.lport  = ~0;
3354         val = (u8 *)&sip;
3355         if ((val[0] | val[1] | val[2] | val[3]) != 0) {
3356                 for (i = 0; i < 4; i++) {
3357                         f->fs.val.lip[i] = val[i];
3358                         f->fs.mask.lip[i] = ~0;
3359                 }
3360                 if (adap->filter_mode & F_PORT) {
3361                         f->fs.val.iport = port;
3362                         f->fs.mask.iport = mask;
3363                 }
3364         }
3365
3366         f->fs.dirsteer = 1;
3367         f->fs.iq = queue;
3368         /* Mark filter as locked */
3369         f->locked = 1;
3370         f->fs.rpttid = 1;
3371
3372         ret = set_filter_wr(adap, stid);
3373         if (ret) {
3374                 clear_filter(adap, f);
3375                 return ret;
3376         }
3377
3378         return 0;
3379 }
3380 EXPORT_SYMBOL(cxgb4_create_server_filter);
3381
3382 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
3383                 unsigned int queue, bool ipv6)
3384 {
3385         int ret;
3386         struct filter_entry *f;
3387         struct adapter *adap;
3388
3389         adap = netdev2adap(dev);
3390
3391         /* Adjust stid to correct filter index */
3392         stid -= adap->tids.nstids;
3393         stid += adap->tids.nftids;
3394
3395         f = &adap->tids.ftid_tab[stid];
3396         /* Unlock the filter */
3397         f->locked = 0;
3398
3399         ret = delete_filter(adap, stid);
3400         if (ret)
3401                 return ret;
3402
3403         return 0;
3404 }
3405 EXPORT_SYMBOL(cxgb4_remove_server_filter);
3406
3407 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
3408                                                 struct rtnl_link_stats64 *ns)
3409 {
3410         struct port_stats stats;
3411         struct port_info *p = netdev_priv(dev);
3412         struct adapter *adapter = p->adapter;
3413
3414         spin_lock(&adapter->stats_lock);
3415         t4_get_port_stats(adapter, p->tx_chan, &stats);
3416         spin_unlock(&adapter->stats_lock);
3417
3418         ns->tx_bytes   = stats.tx_octets;
3419         ns->tx_packets = stats.tx_frames;
3420         ns->rx_bytes   = stats.rx_octets;
3421         ns->rx_packets = stats.rx_frames;
3422         ns->multicast  = stats.rx_mcast_frames;
3423
3424         /* detailed rx_errors */
3425         ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3426                                stats.rx_runt;
3427         ns->rx_over_errors   = 0;
3428         ns->rx_crc_errors    = stats.rx_fcs_err;
3429         ns->rx_frame_errors  = stats.rx_symbol_err;
3430         ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
3431                                stats.rx_ovflow2 + stats.rx_ovflow3 +
3432                                stats.rx_trunc0 + stats.rx_trunc1 +
3433                                stats.rx_trunc2 + stats.rx_trunc3;
3434         ns->rx_missed_errors = 0;
3435
3436         /* detailed tx_errors */
3437         ns->tx_aborted_errors   = 0;
3438         ns->tx_carrier_errors   = 0;
3439         ns->tx_fifo_errors      = 0;
3440         ns->tx_heartbeat_errors = 0;
3441         ns->tx_window_errors    = 0;
3442
3443         ns->tx_errors = stats.tx_error_frames;
3444         ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3445                 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3446         return ns;
3447 }
3448
3449 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3450 {
3451         unsigned int mbox;
3452         int ret = 0, prtad, devad;
3453         struct port_info *pi = netdev_priv(dev);
3454         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3455
3456         switch (cmd) {
3457         case SIOCGMIIPHY:
3458                 if (pi->mdio_addr < 0)
3459                         return -EOPNOTSUPP;
3460                 data->phy_id = pi->mdio_addr;
3461                 break;
3462         case SIOCGMIIREG:
3463         case SIOCSMIIREG:
3464                 if (mdio_phy_id_is_c45(data->phy_id)) {
3465                         prtad = mdio_phy_id_prtad(data->phy_id);
3466                         devad = mdio_phy_id_devad(data->phy_id);
3467                 } else if (data->phy_id < 32) {
3468                         prtad = data->phy_id;
3469                         devad = 0;
3470                         data->reg_num &= 0x1f;
3471                 } else
3472                         return -EINVAL;
3473
3474                 mbox = pi->adapter->fn;
3475                 if (cmd == SIOCGMIIREG)
3476                         ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3477                                          data->reg_num, &data->val_out);
3478                 else
3479                         ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3480                                          data->reg_num, data->val_in);
3481                 break;
3482         default:
3483                 return -EOPNOTSUPP;
3484         }
3485         return ret;
3486 }
3487
3488 static void cxgb_set_rxmode(struct net_device *dev)
3489 {
3490         /* unfortunately we can't return errors to the stack */
3491         set_rxmode(dev, -1, false);
3492 }
3493
3494 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3495 {
3496         int ret;
3497         struct port_info *pi = netdev_priv(dev);
3498
3499         if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
3500                 return -EINVAL;
3501         ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
3502                             -1, -1, -1, true);
3503         if (!ret)
3504                 dev->mtu = new_mtu;
3505         return ret;
3506 }
3507
3508 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3509 {
3510         int ret;
3511         struct sockaddr *addr = p;
3512         struct port_info *pi = netdev_priv(dev);
3513
3514         if (!is_valid_ether_addr(addr->sa_data))
3515                 return -EADDRNOTAVAIL;
3516
3517         ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
3518                             pi->xact_addr_filt, addr->sa_data, true, true);
3519         if (ret < 0)
3520                 return ret;
3521
3522         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3523         pi->xact_addr_filt = ret;
3524         return 0;
3525 }
3526
3527 #ifdef CONFIG_NET_POLL_CONTROLLER
3528 static void cxgb_netpoll(struct net_device *dev)
3529 {
3530         struct port_info *pi = netdev_priv(dev);
3531         struct adapter *adap = pi->adapter;
3532
3533         if (adap->flags & USING_MSIX) {
3534                 int i;
3535                 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3536
3537                 for (i = pi->nqsets; i; i--, rx++)
3538                         t4_sge_intr_msix(0, &rx->rspq);
3539         } else
3540                 t4_intr_handler(adap)(0, adap);
3541 }
3542 #endif
3543
3544 static const struct net_device_ops cxgb4_netdev_ops = {
3545         .ndo_open             = cxgb_open,
3546         .ndo_stop             = cxgb_close,
3547         .ndo_start_xmit       = t4_eth_xmit,
3548         .ndo_get_stats64      = cxgb_get_stats,
3549         .ndo_set_rx_mode      = cxgb_set_rxmode,
3550         .ndo_set_mac_address  = cxgb_set_mac_addr,
3551         .ndo_set_features     = cxgb_set_features,
3552         .ndo_validate_addr    = eth_validate_addr,
3553         .ndo_do_ioctl         = cxgb_ioctl,
3554         .ndo_change_mtu       = cxgb_change_mtu,
3555 #ifdef CONFIG_NET_POLL_CONTROLLER
3556         .ndo_poll_controller  = cxgb_netpoll,
3557 #endif
3558 };
3559
3560 void t4_fatal_err(struct adapter *adap)
3561 {
3562         t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
3563         t4_intr_disable(adap);
3564         dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3565 }
3566
3567 static void setup_memwin(struct adapter *adap)
3568 {
3569         u32 bar0;
3570
3571         bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
3572         t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
3573                      (bar0 + MEMWIN0_BASE) | BIR(0) |
3574                      WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
3575         t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
3576                      (bar0 + MEMWIN1_BASE) | BIR(0) |
3577                      WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
3578         t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3579                      (bar0 + MEMWIN2_BASE) | BIR(0) |
3580                      WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
3581 }
3582
3583 static void setup_memwin_rdma(struct adapter *adap)
3584 {
3585         if (adap->vres.ocq.size) {
3586                 unsigned int start, sz_kb;
3587
3588                 start = pci_resource_start(adap->pdev, 2) +
3589                         OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3590                 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3591                 t4_write_reg(adap,
3592                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
3593                              start | BIR(1) | WINDOW(ilog2(sz_kb)));
3594                 t4_write_reg(adap,
3595                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
3596                              adap->vres.ocq.start);
3597                 t4_read_reg(adap,
3598                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
3599         }
3600 }
3601
3602 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3603 {
3604         u32 v;
3605         int ret;
3606
3607         /* get device capabilities */
3608         memset(c, 0, sizeof(*c));
3609         c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3610                                FW_CMD_REQUEST | FW_CMD_READ);
3611         c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
3612         ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
3613         if (ret < 0)
3614                 return ret;
3615
3616         /* select capabilities we'll be using */
3617         if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3618                 if (!vf_acls)
3619                         c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3620                 else
3621                         c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3622         } else if (vf_acls) {
3623                 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
3624                 return ret;
3625         }
3626         c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3627                                FW_CMD_REQUEST | FW_CMD_WRITE);
3628         ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
3629         if (ret < 0)
3630                 return ret;
3631
3632         ret = t4_config_glbl_rss(adap, adap->fn,
3633                                  FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3634                                  FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3635                                  FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
3636         if (ret < 0)
3637                 return ret;
3638
3639         ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
3640                           0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
3641         if (ret < 0)
3642                 return ret;
3643
3644         t4_sge_init(adap);
3645
3646         /* tweak some settings */
3647         t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
3648         t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
3649         t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
3650         v = t4_read_reg(adap, TP_PIO_DATA);
3651         t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
3652
3653         /* first 4 Tx modulation queues point to consecutive Tx channels */
3654         adap->params.tp.tx_modq_map = 0xE4;
3655         t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3656                      V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
3657
3658         /* associate each Tx modulation queue with consecutive Tx channels */
3659         v = 0x84218421;
3660         t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3661                           &v, 1, A_TP_TX_SCHED_HDR);
3662         t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3663                           &v, 1, A_TP_TX_SCHED_FIFO);
3664         t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3665                           &v, 1, A_TP_TX_SCHED_PCMD);
3666
3667 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
3668         if (is_offload(adap)) {
3669                 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
3670                              V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3671                              V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3672                              V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3673                              V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3674                 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
3675                              V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3676                              V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3677                              V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
3678                              V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
3679         }
3680
3681         /* get basic stuff going */
3682         return t4_early_init(adap, adap->fn);
3683 }
3684
3685 /*
3686  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
3687  */
3688 #define MAX_ATIDS 8192U
3689
3690 /*
3691  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3692  *
3693  * If the firmware we're dealing with has Configuration File support, then
3694  * we use that to perform all configuration
3695  */
3696
3697 /*
3698  * Tweak configuration based on module parameters, etc.  Most of these have
3699  * defaults assigned to them by Firmware Configuration Files (if we're using
3700  * them) but need to be explicitly set if we're using hard-coded
3701  * initialization.  But even in the case of using Firmware Configuration
3702  * Files, we'd like to expose the ability to change these via module
3703  * parameters so these are essentially common tweaks/settings for
3704  * Configuration Files and hard-coded initialization ...
3705  */
3706 static int adap_init0_tweaks(struct adapter *adapter)
3707 {
3708         /*
3709          * Fix up various Host-Dependent Parameters like Page Size, Cache
3710          * Line Size, etc.  The firmware default is for a 4KB Page Size and
3711          * 64B Cache Line Size ...
3712          */
3713         t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3714
3715         /*
3716          * Process module parameters which affect early initialization.
3717          */
3718         if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3719                 dev_err(&adapter->pdev->dev,
3720                         "Ignoring illegal rx_dma_offset=%d, using 2\n",
3721                         rx_dma_offset);
3722                 rx_dma_offset = 2;
3723         }
3724         t4_set_reg_field(adapter, SGE_CONTROL,
3725                          PKTSHIFT_MASK,
3726                          PKTSHIFT(rx_dma_offset));
3727
3728         /*
3729          * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3730          * adds the pseudo header itself.
3731          */
3732         t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
3733                                CSUM_HAS_PSEUDO_HDR, 0);
3734
3735         return 0;
3736 }
3737
3738 /*
3739  * Attempt to initialize the adapter via a Firmware Configuration File.
3740  */
3741 static int adap_init0_config(struct adapter *adapter, int reset)
3742 {
3743         struct fw_caps_config_cmd caps_cmd;
3744         const struct firmware *cf;
3745         unsigned long mtype = 0, maddr = 0;
3746         u32 finiver, finicsum, cfcsum;
3747         int ret, using_flash;
3748
3749         /*
3750          * Reset device if necessary.
3751          */
3752         if (reset) {
3753                 ret = t4_fw_reset(adapter, adapter->mbox,
3754                                   PIORSTMODE | PIORST);
3755                 if (ret < 0)
3756                         goto bye;
3757         }
3758
3759         /*
3760          * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3761          * then use that.  Otherwise, use the configuration file stored
3762          * in the adapter flash ...
3763          */
3764         ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
3765         if (ret < 0) {
3766                 using_flash = 1;
3767                 mtype = FW_MEMTYPE_CF_FLASH;
3768                 maddr = t4_flash_cfg_addr(adapter);
3769         } else {
3770                 u32 params[7], val[7];
3771
3772                 using_flash = 0;
3773                 if (cf->size >= FLASH_CFG_MAX_SIZE)
3774                         ret = -ENOMEM;
3775                 else {
3776                         params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3777                              FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3778                         ret = t4_query_params(adapter, adapter->mbox,
3779                                               adapter->fn, 0, 1, params, val);
3780                         if (ret == 0) {
3781                                 /*
3782                                  * For t4_memory_write() below addresses and
3783                                  * sizes have to be in terms of multiples of 4
3784                                  * bytes.  So, if the Configuration File isn't
3785                                  * a multiple of 4 bytes in length we'll have
3786                                  * to write that out separately since we can't
3787                                  * guarantee that the bytes following the
3788                                  * residual byte in the buffer returned by
3789                                  * request_firmware() are zeroed out ...
3790                                  */
3791                                 size_t resid = cf->size & 0x3;
3792                                 size_t size = cf->size & ~0x3;
3793                                 __be32 *data = (__be32 *)cf->data;
3794
3795                                 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
3796                                 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
3797
3798                                 ret = t4_memory_write(adapter, mtype, maddr,
3799                                                       size, data);
3800                                 if (ret == 0 && resid != 0) {
3801                                         union {
3802                                                 __be32 word;
3803                                                 char buf[4];
3804                                         } last;
3805                                         int i;
3806
3807                                         last.word = data[size >> 2];
3808                                         for (i = resid; i < 4; i++)
3809                                                 last.buf[i] = 0;
3810                                         ret = t4_memory_write(adapter, mtype,
3811                                                               maddr + size,
3812                                                               4, &last.word);
3813                                 }
3814                         }
3815                 }
3816
3817                 release_firmware(cf);
3818                 if (ret)
3819                         goto bye;
3820         }
3821
3822         /*
3823          * Issue a Capability Configuration command to the firmware to get it
3824          * to parse the Configuration File.  We don't use t4_fw_config_file()
3825          * because we want the ability to modify various features after we've
3826          * processed the configuration file ...
3827          */
3828         memset(&caps_cmd, 0, sizeof(caps_cmd));
3829         caps_cmd.op_to_write =
3830                 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3831                       FW_CMD_REQUEST |
3832                       FW_CMD_READ);
3833         caps_cmd.cfvalid_to_len16 =
3834                 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
3835                       FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3836                       FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
3837                       FW_LEN16(caps_cmd));
3838         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3839                          &caps_cmd);
3840         if (ret < 0)
3841                 goto bye;
3842
3843         finiver = ntohl(caps_cmd.finiver);
3844         finicsum = ntohl(caps_cmd.finicsum);
3845         cfcsum = ntohl(caps_cmd.cfcsum);
3846         if (finicsum != cfcsum)
3847                 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3848                          "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3849                          finicsum, cfcsum);
3850
3851         /*
3852          * And now tell the firmware to use the configuration we just loaded.
3853          */
3854         caps_cmd.op_to_write =
3855                 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3856                       FW_CMD_REQUEST |
3857                       FW_CMD_WRITE);
3858         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3859         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3860                          NULL);
3861         if (ret < 0)
3862                 goto bye;
3863
3864         /*
3865          * Tweak configuration based on system architecture, module
3866          * parameters, etc.
3867          */
3868         ret = adap_init0_tweaks(adapter);
3869         if (ret < 0)
3870                 goto bye;
3871
3872         /*
3873          * And finally tell the firmware to initialize itself using the
3874          * parameters from the Configuration File.
3875          */
3876         ret = t4_fw_initialize(adapter, adapter->mbox);
3877         if (ret < 0)
3878                 goto bye;
3879
3880         /*
3881          * Return successfully and note that we're operating with parameters
3882          * not supplied by the driver, rather than from hard-wired
3883          * initialization constants burried in the driver.
3884          */
3885         adapter->flags |= USING_SOFT_PARAMS;
3886         dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3887                  "Configuration File %s, version %#x, computed checksum %#x\n",
3888                  (using_flash
3889                   ? "in device FLASH"
3890                   : "/lib/firmware/" FW_CFNAME),
3891                  finiver, cfcsum);
3892         return 0;
3893
3894         /*
3895          * Something bad happened.  Return the error ...  (If the "error"
3896          * is that there's no Configuration File on the adapter we don't
3897          * want to issue a warning since this is fairly common.)
3898          */
3899 bye:
3900         if (ret != -ENOENT)
3901                 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
3902                          -ret);
3903         return ret;
3904 }
3905
3906 /*
3907  * Attempt to initialize the adapter via hard-coded, driver supplied
3908  * parameters ...
3909  */
3910 static int adap_init0_no_config(struct adapter *adapter, int reset)
3911 {
3912         struct sge *s = &adapter->sge;
3913         struct fw_caps_config_cmd caps_cmd;
3914         u32 v;
3915         int i, ret;
3916
3917         /*
3918          * Reset device if necessary
3919          */
3920         if (reset) {
3921                 ret = t4_fw_reset(adapter, adapter->mbox,
3922                                   PIORSTMODE | PIORST);
3923                 if (ret < 0)
3924                         goto bye;
3925         }
3926
3927         /*
3928          * Get device capabilities and select which we'll be using.
3929          */
3930         memset(&caps_cmd, 0, sizeof(caps_cmd));
3931         caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3932                                      FW_CMD_REQUEST | FW_CMD_READ);
3933         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
3934         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3935                          &caps_cmd);
3936         if (ret < 0)
3937                 goto bye;
3938
3939         if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3940                 if (!vf_acls)
3941                         caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3942                 else
3943                         caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3944         } else if (vf_acls) {
3945                 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
3946                 goto bye;
3947         }
3948         caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3949                               FW_CMD_REQUEST | FW_CMD_WRITE);
3950         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3951                          NULL);
3952         if (ret < 0)
3953                 goto bye;
3954
3955         /*
3956          * Tweak configuration based on system architecture, module
3957          * parameters, etc.
3958          */
3959         ret = adap_init0_tweaks(adapter);
3960         if (ret < 0)
3961                 goto bye;
3962
3963         /*
3964          * Select RSS Global Mode we want to use.  We use "Basic Virtual"
3965          * mode which maps each Virtual Interface to its own section of
3966          * the RSS Table and we turn on all map and hash enables ...
3967          */
3968         adapter->flags |= RSS_TNLALLLOOKUP;
3969         ret = t4_config_glbl_rss(adapter, adapter->mbox,
3970                                  FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3971                                  FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3972                                  FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
3973                                  ((adapter->flags & RSS_TNLALLLOOKUP) ?
3974                                         FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
3975         if (ret < 0)
3976                 goto bye;
3977
3978         /*
3979          * Set up our own fundamental resource provisioning ...
3980          */
3981         ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
3982                           PFRES_NEQ, PFRES_NETHCTRL,
3983                           PFRES_NIQFLINT, PFRES_NIQ,
3984                           PFRES_TC, PFRES_NVI,
3985                           FW_PFVF_CMD_CMASK_MASK,
3986                           pfvfres_pmask(adapter, adapter->fn, 0),
3987                           PFRES_NEXACTF,
3988                           PFRES_R_CAPS, PFRES_WX_CAPS);
3989         if (ret < 0)
3990                 goto bye;
3991
3992         /*
3993          * Perform low level SGE initialization.  We need to do this before we
3994          * send the firmware the INITIALIZE command because that will cause
3995          * any other PF Drivers which are waiting for the Master
3996          * Initialization to proceed forward.
3997          */
3998         for (i = 0; i < SGE_NTIMERS - 1; i++)
3999                 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4000         s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4001         s->counter_val[0] = 1;
4002         for (i = 1; i < SGE_NCOUNTERS; i++)
4003                 s->counter_val[i] = min(intr_cnt[i - 1],
4004                                         THRESHOLD_0_GET(THRESHOLD_0_MASK));
4005         t4_sge_init(adapter);
4006
4007 #ifdef CONFIG_PCI_IOV
4008         /*
4009          * Provision resource limits for Virtual Functions.  We currently
4010          * grant them all the same static resource limits except for the Port
4011          * Access Rights Mask which we're assigning based on the PF.  All of
4012          * the static provisioning stuff for both the PF and VF really needs
4013          * to be managed in a persistent manner for each device which the
4014          * firmware controls.
4015          */
4016         {
4017                 int pf, vf;
4018
4019                 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4020                         if (num_vf[pf] <= 0)
4021                                 continue;
4022
4023                         /* VF numbering starts at 1! */
4024                         for (vf = 1; vf <= num_vf[pf]; vf++) {
4025                                 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4026                                                   pf, vf,
4027                                                   VFRES_NEQ, VFRES_NETHCTRL,
4028                                                   VFRES_NIQFLINT, VFRES_NIQ,
4029                                                   VFRES_TC, VFRES_NVI,
4030                                                   FW_PFVF_CMD_CMASK_MASK,
4031                                                   pfvfres_pmask(
4032                                                   adapter, pf, vf),
4033                                                   VFRES_NEXACTF,
4034                                                   VFRES_R_CAPS, VFRES_WX_CAPS);
4035                                 if (ret < 0)
4036                                         dev_warn(adapter->pdev_dev,
4037                                                  "failed to "\
4038                                                  "provision pf/vf=%d/%d; "
4039                                                  "err=%d\n", pf, vf, ret);
4040                         }
4041                 }
4042         }
4043 #endif
4044
4045         /*
4046          * Set up the default filter mode.  Later we'll want to implement this
4047          * via a firmware command, etc. ...  This needs to be done before the
4048          * firmare initialization command ...  If the selected set of fields
4049          * isn't equal to the default value, we'll need to make sure that the
4050          * field selections will fit in the 36-bit budget.
4051          */
4052         if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4053                 int j, bits = 0;
4054
4055                 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4056                         switch (tp_vlan_pri_map & (1 << j)) {
4057                         case 0:
4058                                 /* compressed filter field not enabled */
4059                                 break;
4060                         case FCOE_MASK:
4061                                 bits +=  1;
4062                                 break;
4063                         case PORT_MASK:
4064                                 bits +=  3;
4065                                 break;
4066                         case VNIC_ID_MASK:
4067                                 bits += 17;
4068                                 break;
4069                         case VLAN_MASK:
4070                                 bits += 17;
4071                                 break;
4072                         case TOS_MASK:
4073                                 bits +=  8;
4074                                 break;
4075                         case PROTOCOL_MASK:
4076                                 bits +=  8;
4077                                 break;
4078                         case ETHERTYPE_MASK:
4079                                 bits += 16;
4080                                 break;
4081                         case MACMATCH_MASK:
4082                                 bits +=  9;
4083                                 break;
4084                         case MPSHITTYPE_MASK:
4085                                 bits +=  3;
4086                                 break;
4087                         case FRAGMENTATION_MASK:
4088                                 bits +=  1;
4089                                 break;
4090                         }
4091
4092                 if (bits > 36) {
4093                         dev_err(adapter->pdev_dev,
4094                                 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4095                                 " using %#x\n", tp_vlan_pri_map, bits,
4096                                 TP_VLAN_PRI_MAP_DEFAULT);
4097                         tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4098                 }
4099         }
4100         v = tp_vlan_pri_map;
4101         t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4102                           &v, 1, TP_VLAN_PRI_MAP);
4103
4104         /*
4105          * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4106          * to support any of the compressed filter fields above.  Newer
4107          * versions of the firmware do this automatically but it doesn't hurt
4108          * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
4109          * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4110          * since the firmware automatically turns this on and off when we have
4111          * a non-zero number of filters active (since it does have a
4112          * performance impact).
4113          */
4114         if (tp_vlan_pri_map)
4115                 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4116                                  FIVETUPLELOOKUP_MASK,
4117                                  FIVETUPLELOOKUP_MASK);
4118
4119         /*
4120          * Tweak some settings.
4121          */
4122         t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4123                      RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4124                      PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4125                      KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4126
4127         /*
4128          * Get basic stuff going by issuing the Firmware Initialize command.
4129          * Note that this _must_ be after all PFVF commands ...
4130          */
4131         ret = t4_fw_initialize(adapter, adapter->mbox);
4132         if (ret < 0)
4133                 goto bye;
4134
4135         /*
4136          * Return successfully!
4137          */
4138         dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4139                  "driver parameters\n");
4140         return 0;
4141
4142         /*
4143          * Something bad happened.  Return the error ...
4144          */
4145 bye:
4146         return ret;
4147 }
4148
4149 /*
4150  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4151  */
4152 static int adap_init0(struct adapter *adap)
4153 {
4154         int ret;
4155         u32 v, port_vec;
4156         enum dev_state state;
4157         u32 params[7], val[7];
4158         struct fw_caps_config_cmd caps_cmd;
4159         int reset = 1, j;
4160
4161         /*
4162          * Contact FW, advertising Master capability (and potentially forcing
4163          * ourselves as the Master PF if our module parameter force_init is
4164          * set).
4165          */
4166         ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4167                           force_init ? MASTER_MUST : MASTER_MAY,
4168                           &state);
4169         if (ret < 0) {
4170                 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4171                         ret);
4172                 return ret;
4173         }
4174         if (ret == adap->mbox)
4175                 adap->flags |= MASTER_PF;
4176         if (force_init && state == DEV_STATE_INIT)
4177                 state = DEV_STATE_UNINIT;
4178
4179         /*
4180          * If we're the Master PF Driver and the device is uninitialized,
4181          * then let's consider upgrading the firmware ...  (We always want
4182          * to check the firmware version number in order to A. get it for
4183          * later reporting and B. to warn if the currently loaded firmware
4184          * is excessively mismatched relative to the driver.)
4185          */
4186         ret = t4_check_fw_version(adap);
4187         if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4188                 if (ret == -EINVAL || ret > 0) {
4189                         if (upgrade_fw(adap) >= 0) {
4190                                 /*
4191                                  * Note that the chip was reset as part of the
4192                                  * firmware upgrade so we don't reset it again
4193                                  * below and grab the new firmware version.
4194                                  */
4195                                 reset = 0;
4196                                 ret = t4_check_fw_version(adap);
4197                         }
4198                 }
4199                 if (ret < 0)
4200                         return ret;
4201         }
4202
4203         /*
4204          * Grab VPD parameters.  This should be done after we establish a
4205          * connection to the firmware since some of the VPD parameters
4206          * (notably the Core Clock frequency) are retrieved via requests to
4207          * the firmware.  On the other hand, we need these fairly early on
4208          * so we do this right after getting ahold of the firmware.
4209          */
4210         ret = get_vpd_params(adap, &adap->params.vpd);
4211         if (ret < 0)
4212                 goto bye;
4213
4214         /*
4215          * Find out what ports are available to us.  Note that we need to do
4216          * this before calling adap_init0_no_config() since it needs nports
4217          * and portvec ...
4218          */
4219         v =
4220             FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4221             FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4222         ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
4223         if (ret < 0)
4224                 goto bye;
4225
4226         adap->params.nports = hweight32(port_vec);
4227         adap->params.portvec = port_vec;
4228
4229         /*
4230          * If the firmware is initialized already (and we're not forcing a
4231          * master initialization), note that we're living with existing
4232          * adapter parameters.  Otherwise, it's time to try initializing the
4233          * adapter ...
4234          */
4235         if (state == DEV_STATE_INIT) {
4236                 dev_info(adap->pdev_dev, "Coming up as %s: "\
4237                          "Adapter already initialized\n",
4238                          adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4239                 adap->flags |= USING_SOFT_PARAMS;
4240         } else {
4241                 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4242                          "Initializing adapter\n");
4243
4244                 /*
4245                  * If the firmware doesn't support Configuration
4246                  * Files warn user and exit,
4247                  */
4248                 if (ret < 0)
4249                         dev_warn(adap->pdev_dev, "Firmware doesn't support "
4250                                  "configuration file.\n");
4251                 if (force_old_init)
4252                         ret = adap_init0_no_config(adap, reset);
4253                 else {
4254                         /*
4255                          * Find out whether we're dealing with a version of
4256                          * the firmware which has configuration file support.
4257                          */
4258                         params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4259                                      FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4260                         ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4261                                               params, val);
4262
4263                         /*
4264                          * If the firmware doesn't support Configuration
4265                          * Files, use the old Driver-based, hard-wired
4266                          * initialization.  Otherwise, try using the
4267                          * Configuration File support and fall back to the
4268                          * Driver-based initialization if there's no
4269                          * Configuration File found.
4270                          */
4271                         if (ret < 0)
4272                                 ret = adap_init0_no_config(adap, reset);
4273                         else {
4274                                 /*
4275                                  * The firmware provides us with a memory
4276                                  * buffer where we can load a Configuration
4277                                  * File from the host if we want to override
4278                                  * the Configuration File in flash.
4279                                  */
4280
4281                                 ret = adap_init0_config(adap, reset);
4282                                 if (ret == -ENOENT) {
4283                                         dev_info(adap->pdev_dev,
4284                                             "No Configuration File present "
4285                                             "on adapter.  Using hard-wired "
4286                                             "configuration parameters.\n");
4287                                         ret = adap_init0_no_config(adap, reset);
4288                                 }
4289                         }
4290                 }
4291                 if (ret < 0) {
4292                         dev_err(adap->pdev_dev,
4293                                 "could not initialize adapter, error %d\n",
4294                                 -ret);
4295                         goto bye;
4296                 }
4297         }
4298
4299         /*
4300          * If we're living with non-hard-coded parameters (either from a
4301          * Firmware Configuration File or values programmed by a different PF
4302          * Driver), give the SGE code a chance to pull in anything that it
4303          * needs ...  Note that this must be called after we retrieve our VPD
4304          * parameters in order to know how to convert core ticks to seconds.
4305          */
4306         if (adap->flags & USING_SOFT_PARAMS) {
4307                 ret = t4_sge_init(adap);
4308                 if (ret < 0)
4309                         goto bye;
4310         }
4311
4312         if (is_bypass_device(adap->pdev->device))
4313                 adap->params.bypass = 1;
4314
4315         /*
4316          * Grab some of our basic fundamental operating parameters.
4317          */
4318 #define FW_PARAM_DEV(param) \
4319         (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4320         FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4321
4322 #define FW_PARAM_PFVF(param) \
4323         FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4324         FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
4325         FW_PARAMS_PARAM_Y(0) | \
4326         FW_PARAMS_PARAM_Z(0)
4327
4328         params[0] = FW_PARAM_PFVF(EQ_START);
4329         params[1] = FW_PARAM_PFVF(L2T_START);
4330         params[2] = FW_PARAM_PFVF(L2T_END);
4331         params[3] = FW_PARAM_PFVF(FILTER_START);
4332         params[4] = FW_PARAM_PFVF(FILTER_END);
4333         params[5] = FW_PARAM_PFVF(IQFLINT_START);
4334         ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
4335         if (ret < 0)
4336                 goto bye;
4337         adap->sge.egr_start = val[0];
4338         adap->l2t_start = val[1];
4339         adap->l2t_end = val[2];
4340         adap->tids.ftid_base = val[3];
4341         adap->tids.nftids = val[4] - val[3] + 1;
4342         adap->sge.ingr_start = val[5];
4343
4344         /* query params related to active filter region */
4345         params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
4346         params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
4347         ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
4348         /* If Active filter size is set we enable establishing
4349          * offload connection through firmware work request
4350          */
4351         if ((val[0] != val[1]) && (ret >= 0)) {
4352                 adap->flags |= FW_OFLD_CONN;
4353                 adap->tids.aftid_base = val[0];
4354                 adap->tids.aftid_end = val[1];
4355         }
4356
4357         /*
4358          * Get device capabilities so we can determine what resources we need
4359          * to manage.
4360          */
4361         memset(&caps_cmd, 0, sizeof(caps_cmd));
4362         caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4363                                      FW_CMD_REQUEST | FW_CMD_READ);
4364         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4365         ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
4366                          &caps_cmd);
4367         if (ret < 0)
4368                 goto bye;
4369
4370         if (caps_cmd.ofldcaps) {
4371                 /* query offload-related parameters */
4372                 params[0] = FW_PARAM_DEV(NTID);
4373                 params[1] = FW_PARAM_PFVF(SERVER_START);
4374                 params[2] = FW_PARAM_PFVF(SERVER_END);
4375                 params[3] = FW_PARAM_PFVF(TDDP_START);
4376                 params[4] = FW_PARAM_PFVF(TDDP_END);
4377                 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
4378                 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4379                                       params, val);
4380                 if (ret < 0)
4381                         goto bye;
4382                 adap->tids.ntids = val[0];
4383                 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
4384                 adap->tids.stid_base = val[1];
4385                 adap->tids.nstids = val[2] - val[1] + 1;
4386                 /*
4387                  * Setup server filter region. Divide the availble filter
4388                  * region into two parts. Regular filters get 1/3rd and server
4389                  * filters get 2/3rd part. This is only enabled if workarond
4390                  * path is enabled.
4391                  * 1. For regular filters.
4392                  * 2. Server filter: This are special filters which are used
4393                  * to redirect SYN packets to offload queue.
4394                  */
4395                 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
4396                         adap->tids.sftid_base = adap->tids.ftid_base +
4397                                         DIV_ROUND_UP(adap->tids.nftids, 3);
4398                         adap->tids.nsftids = adap->tids.nftids -
4399                                          DIV_ROUND_UP(adap->tids.nftids, 3);
4400                         adap->tids.nftids = adap->tids.sftid_base -
4401                                                 adap->tids.ftid_base;
4402                 }
4403                 adap->vres.ddp.start = val[3];
4404                 adap->vres.ddp.size = val[4] - val[3] + 1;
4405                 adap->params.ofldq_wr_cred = val[5];
4406
4407                 adap->params.offload = 1;
4408         }
4409         if (caps_cmd.rdmacaps) {
4410                 params[0] = FW_PARAM_PFVF(STAG_START);
4411                 params[1] = FW_PARAM_PFVF(STAG_END);
4412                 params[2] = FW_PARAM_PFVF(RQ_START);
4413                 params[3] = FW_PARAM_PFVF(RQ_END);
4414                 params[4] = FW_PARAM_PFVF(PBL_START);
4415                 params[5] = FW_PARAM_PFVF(PBL_END);
4416                 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
4417                                       params, val);
4418                 if (ret < 0)
4419                         goto bye;
4420                 adap->vres.stag.start = val[0];
4421                 adap->vres.stag.size = val[1] - val[0] + 1;
4422                 adap->vres.rq.start = val[2];
4423                 adap->vres.rq.size = val[3] - val[2] + 1;
4424                 adap->vres.pbl.start = val[4];
4425                 adap->vres.pbl.size = val[5] - val[4] + 1;
4426
4427                 params[0] = FW_PARAM_PFVF(SQRQ_START);
4428                 params[1] = FW_PARAM_PFVF(SQRQ_END);
4429                 params[2] = FW_PARAM_PFVF(CQ_START);
4430                 params[3] = FW_PARAM_PFVF(CQ_END);
4431                 params[4] = FW_PARAM_PFVF(OCQ_START);
4432                 params[5] = FW_PARAM_PFVF(OCQ_END);
4433                 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
4434                 if (ret < 0)
4435                         goto bye;
4436                 adap->vres.qp.start = val[0];
4437                 adap->vres.qp.size = val[1] - val[0] + 1;
4438                 adap->vres.cq.start = val[2];
4439                 adap->vres.cq.size = val[3] - val[2] + 1;
4440                 adap->vres.ocq.start = val[4];
4441                 adap->vres.ocq.size = val[5] - val[4] + 1;
4442         }
4443         if (caps_cmd.iscsicaps) {
4444                 params[0] = FW_PARAM_PFVF(ISCSI_START);
4445                 params[1] = FW_PARAM_PFVF(ISCSI_END);
4446                 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
4447                                       params, val);
4448                 if (ret < 0)
4449                         goto bye;
4450                 adap->vres.iscsi.start = val[0];
4451                 adap->vres.iscsi.size = val[1] - val[0] + 1;
4452         }
4453 #undef FW_PARAM_PFVF
4454 #undef FW_PARAM_DEV
4455
4456         /*
4457          * These are finalized by FW initialization, load their values now.
4458          */
4459         v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4460         adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4461         adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
4462         t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4463         t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4464                      adap->params.b_wnd);
4465
4466         /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4467         for (j = 0; j < NCHAN; j++)
4468                 adap->params.tp.tx_modq[j] = j;
4469
4470         t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4471                          &adap->filter_mode, 1,
4472                          TP_VLAN_PRI_MAP);
4473
4474         adap->flags |= FW_OK;
4475         return 0;
4476
4477         /*
4478          * Something bad happened.  If a command timed out or failed with EIO
4479          * FW does not operate within its spec or something catastrophic
4480          * happened to HW/FW, stop issuing commands.
4481          */
4482 bye:
4483         if (ret != -ETIMEDOUT && ret != -EIO)
4484                 t4_fw_bye(adap, adap->mbox);
4485         return ret;
4486 }
4487
4488 /* EEH callbacks */
4489
4490 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4491                                          pci_channel_state_t state)
4492 {
4493         int i;
4494         struct adapter *adap = pci_get_drvdata(pdev);
4495
4496         if (!adap)
4497                 goto out;
4498
4499         rtnl_lock();
4500         adap->flags &= ~FW_OK;
4501         notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4502         for_each_port(adap, i) {
4503                 struct net_device *dev = adap->port[i];
4504
4505                 netif_device_detach(dev);
4506                 netif_carrier_off(dev);
4507         }
4508         if (adap->flags & FULL_INIT_DONE)
4509                 cxgb_down(adap);
4510         rtnl_unlock();
4511         pci_disable_device(pdev);
4512 out:    return state == pci_channel_io_perm_failure ?
4513                 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4514 }
4515
4516 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4517 {
4518         int i, ret;
4519         struct fw_caps_config_cmd c;
4520         struct adapter *adap = pci_get_drvdata(pdev);
4521
4522         if (!adap) {
4523                 pci_restore_state(pdev);
4524                 pci_save_state(pdev);
4525                 return PCI_ERS_RESULT_RECOVERED;
4526         }
4527
4528         if (pci_enable_device(pdev)) {
4529                 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
4530                 return PCI_ERS_RESULT_DISCONNECT;
4531         }
4532
4533         pci_set_master(pdev);
4534         pci_restore_state(pdev);
4535         pci_save_state(pdev);
4536         pci_cleanup_aer_uncorrect_error_status(pdev);
4537
4538         if (t4_wait_dev_ready(adap) < 0)
4539                 return PCI_ERS_RESULT_DISCONNECT;
4540         if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
4541                 return PCI_ERS_RESULT_DISCONNECT;
4542         adap->flags |= FW_OK;
4543         if (adap_init1(adap, &c))
4544                 return PCI_ERS_RESULT_DISCONNECT;
4545
4546         for_each_port(adap, i) {
4547                 struct port_info *p = adap2pinfo(adap, i);
4548
4549                 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
4550                                   NULL, NULL);
4551                 if (ret < 0)
4552                         return PCI_ERS_RESULT_DISCONNECT;
4553                 p->viid = ret;
4554                 p->xact_addr_filt = -1;
4555         }
4556
4557         t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4558                      adap->params.b_wnd);
4559         setup_memwin(adap);
4560         if (cxgb_up(adap))
4561                 return PCI_ERS_RESULT_DISCONNECT;
4562         return PCI_ERS_RESULT_RECOVERED;
4563 }
4564
4565 static void eeh_resume(struct pci_dev *pdev)
4566 {
4567         int i;
4568         struct adapter *adap = pci_get_drvdata(pdev);
4569
4570         if (!adap)
4571                 return;
4572
4573         rtnl_lock();
4574         for_each_port(adap, i) {
4575                 struct net_device *dev = adap->port[i];
4576
4577                 if (netif_running(dev)) {
4578                         link_start(dev);
4579                         cxgb_set_rxmode(dev);
4580                 }
4581                 netif_device_attach(dev);
4582         }
4583         rtnl_unlock();
4584 }
4585
4586 static const struct pci_error_handlers cxgb4_eeh = {
4587         .error_detected = eeh_err_detected,
4588         .slot_reset     = eeh_slot_reset,
4589         .resume         = eeh_resume,
4590 };
4591
4592 static inline bool is_10g_port(const struct link_config *lc)
4593 {
4594         return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
4595 }
4596
4597 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
4598                              unsigned int size, unsigned int iqe_size)
4599 {
4600         q->intr_params = QINTR_TIMER_IDX(timer_idx) |
4601                          (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
4602         q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
4603         q->iqe_len = iqe_size;
4604         q->size = size;
4605 }
4606
4607 /*
4608  * Perform default configuration of DMA queues depending on the number and type
4609  * of ports we found and the number of available CPUs.  Most settings can be
4610  * modified by the admin prior to actual use.
4611  */
4612 static void cfg_queues(struct adapter *adap)
4613 {
4614         struct sge *s = &adap->sge;
4615         int i, q10g = 0, n10g = 0, qidx = 0;
4616
4617         for_each_port(adap, i)
4618                 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
4619
4620         /*
4621          * We default to 1 queue per non-10G port and up to # of cores queues
4622          * per 10G port.
4623          */
4624         if (n10g)
4625                 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4626         if (q10g > netif_get_num_default_rss_queues())
4627                 q10g = netif_get_num_default_rss_queues();
4628
4629         for_each_port(adap, i) {
4630                 struct port_info *pi = adap2pinfo(adap, i);
4631
4632                 pi->first_qset = qidx;
4633                 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
4634                 qidx += pi->nqsets;
4635         }
4636
4637         s->ethqsets = qidx;
4638         s->max_ethqsets = qidx;   /* MSI-X may lower it later */
4639
4640         if (is_offload(adap)) {
4641                 /*
4642                  * For offload we use 1 queue/channel if all ports are up to 1G,
4643                  * otherwise we divide all available queues amongst the channels
4644                  * capped by the number of available cores.
4645                  */
4646                 if (n10g) {
4647                         i = min_t(int, ARRAY_SIZE(s->ofldrxq),
4648                                   num_online_cpus());
4649                         s->ofldqsets = roundup(i, adap->params.nports);
4650                 } else
4651                         s->ofldqsets = adap->params.nports;
4652                 /* For RDMA one Rx queue per channel suffices */
4653                 s->rdmaqs = adap->params.nports;
4654         }
4655
4656         for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4657                 struct sge_eth_rxq *r = &s->ethrxq[i];
4658
4659                 init_rspq(&r->rspq, 0, 0, 1024, 64);
4660                 r->fl.size = 72;
4661         }
4662
4663         for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4664                 s->ethtxq[i].q.size = 1024;
4665
4666         for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4667                 s->ctrlq[i].q.size = 512;
4668
4669         for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4670                 s->ofldtxq[i].q.size = 1024;
4671
4672         for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
4673                 struct sge_ofld_rxq *r = &s->ofldrxq[i];
4674
4675                 init_rspq(&r->rspq, 0, 0, 1024, 64);
4676                 r->rspq.uld = CXGB4_ULD_ISCSI;
4677                 r->fl.size = 72;
4678         }
4679
4680         for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4681                 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4682
4683                 init_rspq(&r->rspq, 0, 0, 511, 64);
4684                 r->rspq.uld = CXGB4_ULD_RDMA;
4685                 r->fl.size = 72;
4686         }
4687
4688         init_rspq(&s->fw_evtq, 6, 0, 512, 64);
4689         init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
4690 }
4691
4692 /*
4693  * Reduce the number of Ethernet queues across all ports to at most n.
4694  * n provides at least one queue per port.
4695  */
4696 static void reduce_ethqs(struct adapter *adap, int n)
4697 {
4698         int i;
4699         struct port_info *pi;
4700
4701         while (n < adap->sge.ethqsets)
4702                 for_each_port(adap, i) {
4703                         pi = adap2pinfo(adap, i);
4704                         if (pi->nqsets > 1) {
4705                                 pi->nqsets--;
4706                                 adap->sge.ethqsets--;
4707                                 if (adap->sge.ethqsets <= n)
4708                                         break;
4709                         }
4710                 }
4711
4712         n = 0;
4713         for_each_port(adap, i) {
4714                 pi = adap2pinfo(adap, i);
4715                 pi->first_qset = n;
4716                 n += pi->nqsets;
4717         }
4718 }
4719
4720 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4721 #define EXTRA_VECS 2
4722
4723 static int enable_msix(struct adapter *adap)
4724 {
4725         int ofld_need = 0;
4726         int i, err, want, need;
4727         struct sge *s = &adap->sge;
4728         unsigned int nchan = adap->params.nports;
4729         struct msix_entry entries[MAX_INGQ + 1];
4730
4731         for (i = 0; i < ARRAY_SIZE(entries); ++i)
4732                 entries[i].entry = i;
4733
4734         want = s->max_ethqsets + EXTRA_VECS;
4735         if (is_offload(adap)) {
4736                 want += s->rdmaqs + s->ofldqsets;
4737                 /* need nchan for each possible ULD */
4738                 ofld_need = 2 * nchan;
4739         }
4740         need = adap->params.nports + EXTRA_VECS + ofld_need;
4741
4742         while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
4743                 want = err;
4744
4745         if (!err) {
4746                 /*
4747                  * Distribute available vectors to the various queue groups.
4748                  * Every group gets its minimum requirement and NIC gets top
4749                  * priority for leftovers.
4750                  */
4751                 i = want - EXTRA_VECS - ofld_need;
4752                 if (i < s->max_ethqsets) {
4753                         s->max_ethqsets = i;
4754                         if (i < s->ethqsets)
4755                                 reduce_ethqs(adap, i);
4756                 }
4757                 if (is_offload(adap)) {
4758                         i = want - EXTRA_VECS - s->max_ethqsets;
4759                         i -= ofld_need - nchan;
4760                         s->ofldqsets = (i / nchan) * nchan;  /* round down */
4761                 }
4762                 for (i = 0; i < want; ++i)
4763                         adap->msix_info[i].vec = entries[i].vector;
4764         } else if (err > 0)
4765                 dev_info(adap->pdev_dev,
4766                          "only %d MSI-X vectors left, not using MSI-X\n", err);
4767         return err;
4768 }
4769
4770 #undef EXTRA_VECS
4771
4772 static int init_rss(struct adapter *adap)
4773 {
4774         unsigned int i, j;
4775
4776         for_each_port(adap, i) {
4777                 struct port_info *pi = adap2pinfo(adap, i);
4778
4779                 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4780                 if (!pi->rss)
4781                         return -ENOMEM;
4782                 for (j = 0; j < pi->rss_size; j++)
4783                         pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
4784         }
4785         return 0;
4786 }
4787
4788 static void print_port_info(const struct net_device *dev)
4789 {
4790         static const char *base[] = {
4791                 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
4792                 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
4793         };
4794
4795         char buf[80];
4796         char *bufp = buf;
4797         const char *spd = "";
4798         const struct port_info *pi = netdev_priv(dev);
4799         const struct adapter *adap = pi->adapter;
4800
4801         if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4802                 spd = " 2.5 GT/s";
4803         else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4804                 spd = " 5 GT/s";
4805
4806         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4807                 bufp += sprintf(bufp, "100/");
4808         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4809                 bufp += sprintf(bufp, "1000/");
4810         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4811                 bufp += sprintf(bufp, "10G/");
4812         if (bufp != buf)
4813                 --bufp;
4814         sprintf(bufp, "BASE-%s", base[pi->port_type]);
4815
4816         netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4817                     adap->params.vpd.id, adap->params.rev, buf,
4818                     is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4819                     (adap->flags & USING_MSIX) ? " MSI-X" :
4820                     (adap->flags & USING_MSI) ? " MSI" : "");
4821         netdev_info(dev, "S/N: %s, E/C: %s\n",
4822                     adap->params.vpd.sn, adap->params.vpd.ec);
4823 }
4824
4825 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
4826 {
4827         pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4828 }
4829
4830 /*
4831  * Free the following resources:
4832  * - memory used for tables
4833  * - MSI/MSI-X
4834  * - net devices
4835  * - resources FW is holding for us
4836  */
4837 static void free_some_resources(struct adapter *adapter)
4838 {
4839         unsigned int i;
4840
4841         t4_free_mem(adapter->l2t);
4842         t4_free_mem(adapter->tids.tid_tab);
4843         disable_msi(adapter);
4844
4845         for_each_port(adapter, i)
4846                 if (adapter->port[i]) {
4847                         kfree(adap2pinfo(adapter, i)->rss);
4848                         free_netdev(adapter->port[i]);
4849                 }
4850         if (adapter->flags & FW_OK)
4851                 t4_fw_bye(adapter, adapter->fn);
4852 }
4853
4854 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4855 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4856                    NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4857
4858 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4859 {
4860         int func, i, err;
4861         struct port_info *pi;
4862         bool highdma = false;
4863         struct adapter *adapter = NULL;
4864
4865         printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4866
4867         err = pci_request_regions(pdev, KBUILD_MODNAME);
4868         if (err) {
4869                 /* Just info, some other driver may have claimed the device. */
4870                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4871                 return err;
4872         }
4873
4874         /* We control everything through one PF */
4875         func = PCI_FUNC(pdev->devfn);
4876         if (func != ent->driver_data) {
4877                 pci_save_state(pdev);        /* to restore SR-IOV later */
4878                 goto sriov;
4879         }
4880
4881         err = pci_enable_device(pdev);
4882         if (err) {
4883                 dev_err(&pdev->dev, "cannot enable PCI device\n");
4884                 goto out_release_regions;
4885         }
4886
4887         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4888                 highdma = true;
4889                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4890                 if (err) {
4891                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4892                                 "coherent allocations\n");
4893                         goto out_disable_device;
4894                 }
4895         } else {
4896                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4897                 if (err) {
4898                         dev_err(&pdev->dev, "no usable DMA configuration\n");
4899                         goto out_disable_device;
4900                 }
4901         }
4902
4903         pci_enable_pcie_error_reporting(pdev);
4904         enable_pcie_relaxed_ordering(pdev);
4905         pci_set_master(pdev);
4906         pci_save_state(pdev);
4907
4908         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4909         if (!adapter) {
4910                 err = -ENOMEM;
4911                 goto out_disable_device;
4912         }
4913
4914         adapter->regs = pci_ioremap_bar(pdev, 0);
4915         if (!adapter->regs) {
4916                 dev_err(&pdev->dev, "cannot map device registers\n");
4917                 err = -ENOMEM;
4918                 goto out_free_adapter;
4919         }
4920
4921         adapter->pdev = pdev;
4922         adapter->pdev_dev = &pdev->dev;
4923         adapter->mbox = func;
4924         adapter->fn = func;
4925         adapter->msg_enable = dflt_msg_enable;
4926         memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4927
4928         spin_lock_init(&adapter->stats_lock);
4929         spin_lock_init(&adapter->tid_release_lock);
4930
4931         INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
4932         INIT_WORK(&adapter->db_full_task, process_db_full);
4933         INIT_WORK(&adapter->db_drop_task, process_db_drop);
4934
4935         err = t4_prep_adapter(adapter);
4936         if (err)
4937                 goto out_unmap_bar;
4938         setup_memwin(adapter);
4939         err = adap_init0(adapter);
4940         setup_memwin_rdma(adapter);
4941         if (err)
4942                 goto out_unmap_bar;
4943
4944         for_each_port(adapter, i) {
4945                 struct net_device *netdev;
4946
4947                 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4948                                            MAX_ETH_QSETS);
4949                 if (!netdev) {
4950                         err = -ENOMEM;
4951                         goto out_free_dev;
4952                 }
4953
4954                 SET_NETDEV_DEV(netdev, &pdev->dev);
4955
4956                 adapter->port[i] = netdev;
4957                 pi = netdev_priv(netdev);
4958                 pi->adapter = adapter;
4959                 pi->xact_addr_filt = -1;
4960                 pi->port_id = i;
4961                 netdev->irq = pdev->irq;
4962
4963                 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4964                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4965                         NETIF_F_RXCSUM | NETIF_F_RXHASH |
4966                         NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4967                 if (highdma)
4968                         netdev->hw_features |= NETIF_F_HIGHDMA;
4969                 netdev->features |= netdev->hw_features;
4970                 netdev->vlan_features = netdev->features & VLAN_FEAT;
4971
4972                 netdev->priv_flags |= IFF_UNICAST_FLT;
4973
4974                 netdev->netdev_ops = &cxgb4_netdev_ops;
4975                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
4976         }
4977
4978         pci_set_drvdata(pdev, adapter);
4979
4980         if (adapter->flags & FW_OK) {
4981                 err = t4_port_init(adapter, func, func, 0);
4982                 if (err)
4983                         goto out_free_dev;
4984         }
4985
4986         /*
4987          * Configure queues and allocate tables now, they can be needed as
4988          * soon as the first register_netdev completes.
4989          */
4990         cfg_queues(adapter);
4991
4992         adapter->l2t = t4_init_l2t();
4993         if (!adapter->l2t) {
4994                 /* We tolerate a lack of L2T, giving up some functionality */
4995                 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4996                 adapter->params.offload = 0;
4997         }
4998
4999         if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5000                 dev_warn(&pdev->dev, "could not allocate TID table, "
5001                          "continuing\n");
5002                 adapter->params.offload = 0;
5003         }
5004
5005         /* See what interrupts we'll be using */
5006         if (msi > 1 && enable_msix(adapter) == 0)
5007                 adapter->flags |= USING_MSIX;
5008         else if (msi > 0 && pci_enable_msi(pdev) == 0)
5009                 adapter->flags |= USING_MSI;
5010
5011         err = init_rss(adapter);
5012         if (err)
5013                 goto out_free_dev;
5014
5015         /*
5016          * The card is now ready to go.  If any errors occur during device
5017          * registration we do not fail the whole card but rather proceed only
5018          * with the ports we manage to register successfully.  However we must
5019          * register at least one net device.
5020          */
5021         for_each_port(adapter, i) {
5022                 pi = adap2pinfo(adapter, i);
5023                 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5024                 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5025
5026                 err = register_netdev(adapter->port[i]);
5027                 if (err)
5028                         break;
5029                 adapter->chan_map[pi->tx_chan] = i;
5030                 print_port_info(adapter->port[i]);
5031         }
5032         if (i == 0) {
5033                 dev_err(&pdev->dev, "could not register any net devices\n");
5034                 goto out_free_dev;
5035         }
5036         if (err) {
5037                 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5038                 err = 0;
5039         }
5040
5041         if (cxgb4_debugfs_root) {
5042                 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5043                                                            cxgb4_debugfs_root);
5044                 setup_debugfs(adapter);
5045         }
5046
5047         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5048         pdev->needs_freset = 1;
5049
5050         if (is_offload(adapter))
5051                 attach_ulds(adapter);
5052
5053 sriov:
5054 #ifdef CONFIG_PCI_IOV
5055         if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
5056                 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5057                         dev_info(&pdev->dev,
5058                                  "instantiated %u virtual functions\n",
5059                                  num_vf[func]);
5060 #endif
5061         return 0;
5062
5063  out_free_dev:
5064         free_some_resources(adapter);
5065  out_unmap_bar:
5066         iounmap(adapter->regs);
5067  out_free_adapter:
5068         kfree(adapter);
5069  out_disable_device:
5070         pci_disable_pcie_error_reporting(pdev);
5071         pci_disable_device(pdev);
5072  out_release_regions:
5073         pci_release_regions(pdev);
5074         pci_set_drvdata(pdev, NULL);
5075         return err;
5076 }
5077
5078 static void remove_one(struct pci_dev *pdev)
5079 {
5080         struct adapter *adapter = pci_get_drvdata(pdev);
5081
5082 #ifdef CONFIG_PCI_IOV
5083         pci_disable_sriov(pdev);
5084
5085 #endif
5086
5087         if (adapter) {
5088                 int i;
5089
5090                 if (is_offload(adapter))
5091                         detach_ulds(adapter);
5092
5093                 for_each_port(adapter, i)
5094                         if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5095                                 unregister_netdev(adapter->port[i]);
5096
5097                 if (adapter->debugfs_root)
5098                         debugfs_remove_recursive(adapter->debugfs_root);
5099
5100                 /* If we allocated filters, free up state associated with any
5101                  * valid filters ...
5102                  */
5103                 if (adapter->tids.ftid_tab) {
5104                         struct filter_entry *f = &adapter->tids.ftid_tab[0];
5105                         for (i = 0; i < (adapter->tids.nftids +
5106                                         adapter->tids.nsftids); i++, f++)
5107                                 if (f->valid)
5108                                         clear_filter(adapter, f);
5109                 }
5110
5111                 if (adapter->flags & FULL_INIT_DONE)
5112                         cxgb_down(adapter);
5113
5114                 free_some_resources(adapter);
5115                 iounmap(adapter->regs);
5116                 kfree(adapter);
5117                 pci_disable_pcie_error_reporting(pdev);
5118                 pci_disable_device(pdev);
5119                 pci_release_regions(pdev);
5120                 pci_set_drvdata(pdev, NULL);
5121         } else
5122                 pci_release_regions(pdev);
5123 }
5124
5125 static struct pci_driver cxgb4_driver = {
5126         .name     = KBUILD_MODNAME,
5127         .id_table = cxgb4_pci_tbl,
5128         .probe    = init_one,
5129         .remove   = remove_one,
5130         .err_handler = &cxgb4_eeh,
5131 };
5132
5133 static int __init cxgb4_init_module(void)
5134 {
5135         int ret;
5136
5137         workq = create_singlethread_workqueue("cxgb4");
5138         if (!workq)
5139                 return -ENOMEM;
5140
5141         /* Debugfs support is optional, just warn if this fails */
5142         cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5143         if (!cxgb4_debugfs_root)
5144                 pr_warn("could not create debugfs entry, continuing\n");
5145
5146         ret = pci_register_driver(&cxgb4_driver);
5147         if (ret < 0)
5148                 debugfs_remove(cxgb4_debugfs_root);
5149         return ret;
5150 }
5151
5152 static void __exit cxgb4_cleanup_module(void)
5153 {
5154         pci_unregister_driver(&cxgb4_driver);
5155         debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
5156         flush_workqueue(workq);
5157         destroy_workqueue(workq);
5158 }
5159
5160 module_init(cxgb4_init_module);
5161 module_exit(cxgb4_cleanup_module);