2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
70 #include "cxgb4_dcb.h"
73 #include <../drivers/net/bonding/bonding.h>
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
86 #define MAX_SGE_TIMERVAL 200U
90 * Physical Function provisioning constants.
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
104 #ifdef CONFIG_PCI_IOV
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
138 unsigned int portn, portvec;
141 * Give PF's access to all of the ports.
144 return FW_PFVF_CMD_PMASK_MASK;
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
153 if (adapter->params.nports == 0)
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
191 struct filter_entry {
192 /* Administrative fields for filter.
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
206 struct ch_filter_specification fs;
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
215 static const struct pci_device_id cxgb4_pci_tbl[] = {
216 CH_DEVICE(0xa000, 0), /* PE10K */
217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1),
238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4),
248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4),
250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4),
259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4),
278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4),
280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4),
286 CH_DEVICE(0x5401, 4),
287 CH_DEVICE(0x5402, 4),
288 CH_DEVICE(0x5403, 4),
289 CH_DEVICE(0x5404, 4),
290 CH_DEVICE(0x5405, 4),
291 CH_DEVICE(0x5406, 4),
292 CH_DEVICE(0x5407, 4),
293 CH_DEVICE(0x5408, 4),
294 CH_DEVICE(0x5409, 4),
295 CH_DEVICE(0x540A, 4),
296 CH_DEVICE(0x540B, 4),
297 CH_DEVICE(0x540C, 4),
298 CH_DEVICE(0x540D, 4),
299 CH_DEVICE(0x540E, 4),
300 CH_DEVICE(0x540F, 4),
301 CH_DEVICE(0x5410, 4),
302 CH_DEVICE(0x5411, 4),
303 CH_DEVICE(0x5412, 4),
304 CH_DEVICE(0x5413, 4),
305 CH_DEVICE(0x5414, 4),
306 CH_DEVICE(0x5415, 4),
307 CH_DEVICE(0x5480, 4),
308 CH_DEVICE(0x5481, 4),
309 CH_DEVICE(0x5482, 4),
310 CH_DEVICE(0x5483, 4),
311 CH_DEVICE(0x5484, 4),
312 CH_DEVICE(0x5485, 4),
316 #define FW4_FNAME "cxgb4/t4fw.bin"
317 #define FW5_FNAME "cxgb4/t5fw.bin"
318 #define FW4_CFNAME "cxgb4/t4-config.txt"
319 #define FW5_CFNAME "cxgb4/t5-config.txt"
321 MODULE_DESCRIPTION(DRV_DESC);
322 MODULE_AUTHOR("Chelsio Communications");
323 MODULE_LICENSE("Dual BSD/GPL");
324 MODULE_VERSION(DRV_VERSION);
325 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
326 MODULE_FIRMWARE(FW4_FNAME);
327 MODULE_FIRMWARE(FW5_FNAME);
330 * Normally we're willing to become the firmware's Master PF but will be happy
331 * if another PF has already become the Master and initialized the adapter.
332 * Setting "force_init" will cause this driver to forcibly establish itself as
333 * the Master PF and initialize the adapter.
335 static uint force_init;
337 module_param(force_init, uint, 0644);
338 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
341 * Normally if the firmware we connect to has Configuration File support, we
342 * use that and only fall back to the old Driver-based initialization if the
343 * Configuration File fails for some reason. If force_old_init is set, then
344 * we'll always use the old Driver-based initialization sequence.
346 static uint force_old_init;
348 module_param(force_old_init, uint, 0644);
349 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
351 static int dflt_msg_enable = DFLT_MSG_ENABLE;
353 module_param(dflt_msg_enable, int, 0644);
354 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
357 * The driver uses the best interrupt scheme available on a platform in the
358 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
359 * of these schemes the driver may consider as follows:
361 * msi = 2: choose from among all three options
362 * msi = 1: only consider MSI and INTx interrupts
363 * msi = 0: force INTx interrupts
367 module_param(msi, int, 0644);
368 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
371 * Queue interrupt hold-off timer values. Queues default to the first of these
374 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
376 module_param_array(intr_holdoff, uint, NULL, 0644);
377 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
378 "0..4 in microseconds");
380 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
382 module_param_array(intr_cnt, uint, NULL, 0644);
383 MODULE_PARM_DESC(intr_cnt,
384 "thresholds 1..3 for queue interrupt packet counters");
387 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
388 * offset by 2 bytes in order to have the IP headers line up on 4-byte
389 * boundaries. This is a requirement for many architectures which will throw
390 * a machine check fault if an attempt is made to access one of the 4-byte IP
391 * header fields on a non-4-byte boundary. And it's a major performance issue
392 * even on some architectures which allow it like some implementations of the
393 * x86 ISA. However, some architectures don't mind this and for some very
394 * edge-case performance sensitive applications (like forwarding large volumes
395 * of small packets), setting this DMA offset to 0 will decrease the number of
396 * PCI-E Bus transfers enough to measurably affect performance.
398 static int rx_dma_offset = 2;
402 #ifdef CONFIG_PCI_IOV
403 module_param(vf_acls, bool, 0644);
404 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
406 /* Configure the number of PCI-E Virtual Function which are to be instantiated
407 * on SR-IOV Capable Physical Functions.
409 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
411 module_param_array(num_vf, uint, NULL, 0644);
412 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
415 /* TX Queue select used to determine what algorithm to use for selecting TX
416 * queue. Select between the kernel provided function (select_queue=0) or user
417 * cxgb_select_queue function (select_queue=1)
419 * Default: select_queue=0
421 static int select_queue;
422 module_param(select_queue, int, 0644);
423 MODULE_PARM_DESC(select_queue,
424 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
427 * The filter TCAM has a fixed portion and a variable portion. The fixed
428 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
429 * ports. The variable portion is 36 bits which can include things like Exact
430 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
431 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
432 * far exceed the 36-bit budget for this "compressed" header portion of the
433 * filter. Thus, we have a scarce resource which must be carefully managed.
435 * By default we set this up to mostly match the set of filter matching
436 * capabilities of T3 but with accommodations for some of T4's more
437 * interesting features:
439 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
440 * [Inner] VLAN (17), Port (3), FCoE (1) }
443 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
444 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
445 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
448 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
450 module_param(tp_vlan_pri_map, uint, 0644);
451 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
453 static struct dentry *cxgb4_debugfs_root;
455 static LIST_HEAD(adapter_list);
456 static DEFINE_MUTEX(uld_mutex);
457 /* Adapter list to be accessed from atomic context */
458 static LIST_HEAD(adap_rcu_list);
459 static DEFINE_SPINLOCK(adap_rcu_lock);
460 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
461 static const char *uld_str[] = { "RDMA", "iSCSI" };
463 static void link_report(struct net_device *dev)
465 if (!netif_carrier_ok(dev))
466 netdev_info(dev, "link down\n");
468 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
470 const char *s = "10Mbps";
471 const struct port_info *p = netdev_priv(dev);
473 switch (p->link_cfg.speed) {
488 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
493 #ifdef CONFIG_CHELSIO_T4_DCB
494 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
495 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
497 struct port_info *pi = netdev_priv(dev);
498 struct adapter *adap = pi->adapter;
499 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
502 /* We use a simple mapping of Port TX Queue Index to DCB
503 * Priority when we're enabling DCB.
505 for (i = 0; i < pi->nqsets; i++, txq++) {
509 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
510 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
511 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
512 value = enable ? i : 0xffffffff;
514 /* Since we can be called while atomic (from "interrupt
515 * level") we need to issue the Set Parameters Commannd
516 * without sleeping (timeout < 0).
518 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
522 dev_err(adap->pdev_dev,
523 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
524 enable ? "set" : "unset", pi->port_id, i, -err);
526 txq->dcb_prio = value;
529 #endif /* CONFIG_CHELSIO_T4_DCB */
531 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
533 struct net_device *dev = adapter->port[port_id];
535 /* Skip changes from disabled ports. */
536 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
538 netif_carrier_on(dev);
540 #ifdef CONFIG_CHELSIO_T4_DCB
541 cxgb4_dcb_state_init(dev);
542 dcb_tx_queue_prio_enable(dev, false);
543 #endif /* CONFIG_CHELSIO_T4_DCB */
544 netif_carrier_off(dev);
551 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
553 static const char *mod_str[] = {
554 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
557 const struct net_device *dev = adap->port[port_id];
558 const struct port_info *pi = netdev_priv(dev);
560 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
561 netdev_info(dev, "port module unplugged\n");
562 else if (pi->mod_type < ARRAY_SIZE(mod_str))
563 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
567 * Configure the exact and hash address filters to handle a port's multicast
568 * and secondary unicast MAC addresses.
570 static int set_addr_filters(const struct net_device *dev, bool sleep)
578 const struct netdev_hw_addr *ha;
579 int uc_cnt = netdev_uc_count(dev);
580 int mc_cnt = netdev_mc_count(dev);
581 const struct port_info *pi = netdev_priv(dev);
582 unsigned int mb = pi->adapter->fn;
584 /* first do the secondary unicast addresses */
585 netdev_for_each_uc_addr(ha, dev) {
586 addr[naddr++] = ha->addr;
587 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
588 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
589 naddr, addr, filt_idx, &uhash, sleep);
598 /* next set up the multicast addresses */
599 netdev_for_each_mc_addr(ha, dev) {
600 addr[naddr++] = ha->addr;
601 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
602 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
603 naddr, addr, filt_idx, &mhash, sleep);
612 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
613 uhash | mhash, sleep);
616 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
617 module_param(dbfifo_int_thresh, int, 0644);
618 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
621 * usecs to sleep while draining the dbfifo
623 static int dbfifo_drain_delay = 1000;
624 module_param(dbfifo_drain_delay, int, 0644);
625 MODULE_PARM_DESC(dbfifo_drain_delay,
626 "usecs to sleep while draining the dbfifo");
629 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
630 * If @mtu is -1 it is left unchanged.
632 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
635 struct port_info *pi = netdev_priv(dev);
637 ret = set_addr_filters(dev, sleep_ok);
639 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
640 (dev->flags & IFF_PROMISC) ? 1 : 0,
641 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
646 static struct workqueue_struct *workq;
649 * link_start - enable a port
650 * @dev: the port to enable
652 * Performs the MAC and PHY actions needed to enable a port.
654 static int link_start(struct net_device *dev)
657 struct port_info *pi = netdev_priv(dev);
658 unsigned int mb = pi->adapter->fn;
661 * We do not set address filters and promiscuity here, the stack does
662 * that step explicitly.
664 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
665 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
667 ret = t4_change_mac(pi->adapter, mb, pi->viid,
668 pi->xact_addr_filt, dev->dev_addr, true,
671 pi->xact_addr_filt = ret;
676 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
680 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
681 true, CXGB4_DCB_ENABLED);
688 int cxgb4_dcb_enabled(const struct net_device *dev)
690 #ifdef CONFIG_CHELSIO_T4_DCB
691 struct port_info *pi = netdev_priv(dev);
693 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
698 EXPORT_SYMBOL(cxgb4_dcb_enabled);
700 #ifdef CONFIG_CHELSIO_T4_DCB
701 /* Handle a Data Center Bridging update message from the firmware. */
702 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
704 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
705 struct net_device *dev = adap->port[port];
706 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
709 cxgb4_dcb_handle_fw_update(adap, pcmd);
710 new_dcb_enabled = cxgb4_dcb_enabled(dev);
712 /* If the DCB has become enabled or disabled on the port then we're
713 * going to need to set up/tear down DCB Priority parameters for the
714 * TX Queues associated with the port.
716 if (new_dcb_enabled != old_dcb_enabled)
717 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
719 #endif /* CONFIG_CHELSIO_T4_DCB */
721 /* Clear a filter and release any of its resources that we own. This also
722 * clears the filter's "pending" status.
724 static void clear_filter(struct adapter *adap, struct filter_entry *f)
726 /* If the new or old filter have loopback rewriteing rules then we'll
727 * need to free any existing Layer Two Table (L2T) entries of the old
728 * filter rule. The firmware will handle freeing up any Source MAC
729 * Table (SMT) entries used for rewriting Source MAC Addresses in
733 cxgb4_l2t_release(f->l2t);
735 /* The zeroing of the filter rule below clears the filter valid,
736 * pending, locked flags, l2t pointer, etc. so it's all we need for
739 memset(f, 0, sizeof(*f));
742 /* Handle a filter write/deletion reply.
744 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
746 unsigned int idx = GET_TID(rpl);
747 unsigned int nidx = idx - adap->tids.ftid_base;
749 struct filter_entry *f;
751 if (idx >= adap->tids.ftid_base && nidx <
752 (adap->tids.nftids + adap->tids.nsftids)) {
754 ret = GET_TCB_COOKIE(rpl->cookie);
755 f = &adap->tids.ftid_tab[idx];
757 if (ret == FW_FILTER_WR_FLT_DELETED) {
758 /* Clear the filter when we get confirmation from the
759 * hardware that the filter has been deleted.
761 clear_filter(adap, f);
762 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
763 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
765 clear_filter(adap, f);
766 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
767 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
768 f->pending = 0; /* asynchronous setup completed */
771 /* Something went wrong. Issue a warning about the
772 * problem and clear everything out.
774 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
776 clear_filter(adap, f);
781 /* Response queue handler for the FW event queue.
783 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
784 const struct pkt_gl *gl)
786 u8 opcode = ((const struct rss_header *)rsp)->opcode;
788 rsp++; /* skip RSS header */
790 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
792 if (unlikely(opcode == CPL_FW4_MSG &&
793 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
795 opcode = ((const struct rss_header *)rsp)->opcode;
797 if (opcode != CPL_SGE_EGR_UPDATE) {
798 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
804 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
805 const struct cpl_sge_egr_update *p = (void *)rsp;
806 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
809 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
811 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
812 struct sge_eth_txq *eq;
814 eq = container_of(txq, struct sge_eth_txq, q);
815 netif_tx_wake_queue(eq->txq);
817 struct sge_ofld_txq *oq;
819 oq = container_of(txq, struct sge_ofld_txq, q);
820 tasklet_schedule(&oq->qresume_tsk);
822 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
823 const struct cpl_fw6_msg *p = (void *)rsp;
825 #ifdef CONFIG_CHELSIO_T4_DCB
826 const struct fw_port_cmd *pcmd = (const void *)p->data;
827 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
828 unsigned int action =
829 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
831 if (cmd == FW_PORT_CMD &&
832 action == FW_PORT_ACTION_GET_PORT_INFO) {
833 int port = FW_PORT_CMD_PORTID_GET(
834 be32_to_cpu(pcmd->op_to_portid));
835 struct net_device *dev = q->adap->port[port];
836 int state_input = ((pcmd->u.info.dcbxdis_pkd &
838 ? CXGB4_DCB_INPUT_FW_DISABLED
839 : CXGB4_DCB_INPUT_FW_ENABLED);
841 cxgb4_dcb_state_fsm(dev, state_input);
844 if (cmd == FW_PORT_CMD &&
845 action == FW_PORT_ACTION_L2_DCB_CFG)
846 dcb_rpl(q->adap, pcmd);
850 t4_handle_fw_rpl(q->adap, p->data);
851 } else if (opcode == CPL_L2T_WRITE_RPL) {
852 const struct cpl_l2t_write_rpl *p = (void *)rsp;
854 do_l2t_write_rpl(q->adap, p);
855 } else if (opcode == CPL_SET_TCB_RPL) {
856 const struct cpl_set_tcb_rpl *p = (void *)rsp;
858 filter_rpl(q->adap, p);
860 dev_err(q->adap->pdev_dev,
861 "unexpected CPL %#x on FW event queue\n", opcode);
867 * uldrx_handler - response queue handler for ULD queues
868 * @q: the response queue that received the packet
869 * @rsp: the response queue descriptor holding the offload message
870 * @gl: the gather list of packet fragments
872 * Deliver an ingress offload packet to a ULD. All processing is done by
873 * the ULD, we just maintain statistics.
875 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
876 const struct pkt_gl *gl)
878 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
880 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
882 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
883 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
886 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
892 else if (gl == CXGB4_MSG_AN)
899 static void disable_msi(struct adapter *adapter)
901 if (adapter->flags & USING_MSIX) {
902 pci_disable_msix(adapter->pdev);
903 adapter->flags &= ~USING_MSIX;
904 } else if (adapter->flags & USING_MSI) {
905 pci_disable_msi(adapter->pdev);
906 adapter->flags &= ~USING_MSI;
911 * Interrupt handler for non-data events used with MSI-X.
913 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
915 struct adapter *adap = cookie;
917 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
920 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
922 t4_slow_intr_handler(adap);
927 * Name the MSI-X interrupts.
929 static void name_msix_vecs(struct adapter *adap)
931 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
933 /* non-data interrupts */
934 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
937 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
938 adap->port[0]->name);
940 /* Ethernet queues */
941 for_each_port(adap, j) {
942 struct net_device *d = adap->port[j];
943 const struct port_info *pi = netdev_priv(d);
945 for (i = 0; i < pi->nqsets; i++, msi_idx++)
946 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
951 for_each_ofldrxq(&adap->sge, i)
952 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
953 adap->port[0]->name, i);
955 for_each_rdmarxq(&adap->sge, i)
956 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
957 adap->port[0]->name, i);
959 for_each_rdmaciq(&adap->sge, i)
960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
961 adap->port[0]->name, i);
964 static int request_msix_queue_irqs(struct adapter *adap)
966 struct sge *s = &adap->sge;
967 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
970 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
971 adap->msix_info[1].desc, &s->fw_evtq);
975 for_each_ethrxq(s, ethqidx) {
976 err = request_irq(adap->msix_info[msi_index].vec,
978 adap->msix_info[msi_index].desc,
979 &s->ethrxq[ethqidx].rspq);
984 for_each_ofldrxq(s, ofldqidx) {
985 err = request_irq(adap->msix_info[msi_index].vec,
987 adap->msix_info[msi_index].desc,
988 &s->ofldrxq[ofldqidx].rspq);
993 for_each_rdmarxq(s, rdmaqidx) {
994 err = request_irq(adap->msix_info[msi_index].vec,
996 adap->msix_info[msi_index].desc,
997 &s->rdmarxq[rdmaqidx].rspq);
1002 for_each_rdmaciq(s, rdmaciqqidx) {
1003 err = request_irq(adap->msix_info[msi_index].vec,
1004 t4_sge_intr_msix, 0,
1005 adap->msix_info[msi_index].desc,
1006 &s->rdmaciq[rdmaciqqidx].rspq);
1014 while (--rdmaciqqidx >= 0)
1015 free_irq(adap->msix_info[--msi_index].vec,
1016 &s->rdmaciq[rdmaciqqidx].rspq);
1017 while (--rdmaqidx >= 0)
1018 free_irq(adap->msix_info[--msi_index].vec,
1019 &s->rdmarxq[rdmaqidx].rspq);
1020 while (--ofldqidx >= 0)
1021 free_irq(adap->msix_info[--msi_index].vec,
1022 &s->ofldrxq[ofldqidx].rspq);
1023 while (--ethqidx >= 0)
1024 free_irq(adap->msix_info[--msi_index].vec,
1025 &s->ethrxq[ethqidx].rspq);
1026 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1030 static void free_msix_queue_irqs(struct adapter *adap)
1032 int i, msi_index = 2;
1033 struct sge *s = &adap->sge;
1035 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1036 for_each_ethrxq(s, i)
1037 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1038 for_each_ofldrxq(s, i)
1039 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1040 for_each_rdmarxq(s, i)
1041 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1042 for_each_rdmaciq(s, i)
1043 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1047 * write_rss - write the RSS table for a given port
1049 * @queues: array of queue indices for RSS
1051 * Sets up the portion of the HW RSS table for the port's VI to distribute
1052 * packets to the Rx queues in @queues.
1054 static int write_rss(const struct port_info *pi, const u16 *queues)
1058 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1060 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1064 /* map the queue indices to queue ids */
1065 for (i = 0; i < pi->rss_size; i++, queues++)
1066 rss[i] = q[*queues].rspq.abs_id;
1068 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1069 pi->rss_size, rss, pi->rss_size);
1075 * setup_rss - configure RSS
1076 * @adap: the adapter
1078 * Sets up RSS for each port.
1080 static int setup_rss(struct adapter *adap)
1084 for_each_port(adap, i) {
1085 const struct port_info *pi = adap2pinfo(adap, i);
1087 err = write_rss(pi, pi->rss);
1095 * Return the channel of the ingress queue with the given qid.
1097 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1099 qid -= p->ingr_start;
1100 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1104 * Wait until all NAPI handlers are descheduled.
1106 static void quiesce_rx(struct adapter *adap)
1110 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1111 struct sge_rspq *q = adap->sge.ingr_map[i];
1113 if (q && q->handler)
1114 napi_disable(&q->napi);
1119 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1121 static void enable_rx(struct adapter *adap)
1125 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1126 struct sge_rspq *q = adap->sge.ingr_map[i];
1131 napi_enable(&q->napi);
1132 /* 0-increment GTS to start the timer and enable interrupts */
1133 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1134 SEINTARM(q->intr_params) |
1135 INGRESSQID(q->cntxt_id));
1140 * setup_sge_queues - configure SGE Tx/Rx/response queues
1141 * @adap: the adapter
1143 * Determines how many sets of SGE queues to use and initializes them.
1144 * We support multiple queue sets per port if we have MSI-X, otherwise
1145 * just one queue set per port.
1147 static int setup_sge_queues(struct adapter *adap)
1149 int err, msi_idx, i, j;
1150 struct sge *s = &adap->sge;
1152 bitmap_zero(s->starving_fl, MAX_EGRQ);
1153 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1155 if (adap->flags & USING_MSIX)
1156 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1158 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1162 msi_idx = -((int)s->intrq.abs_id + 1);
1165 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1166 msi_idx, NULL, fwevtq_handler);
1168 freeout: t4_free_sge_resources(adap);
1172 for_each_port(adap, i) {
1173 struct net_device *dev = adap->port[i];
1174 struct port_info *pi = netdev_priv(dev);
1175 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1176 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1178 for (j = 0; j < pi->nqsets; j++, q++) {
1181 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1187 memset(&q->stats, 0, sizeof(q->stats));
1189 for (j = 0; j < pi->nqsets; j++, t++) {
1190 err = t4_sge_alloc_eth_txq(adap, t, dev,
1191 netdev_get_tx_queue(dev, j),
1192 s->fw_evtq.cntxt_id);
1198 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1199 for_each_ofldrxq(s, i) {
1200 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1201 struct net_device *dev = adap->port[i / j];
1205 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1206 q->fl.size ? &q->fl : NULL,
1210 memset(&q->stats, 0, sizeof(q->stats));
1211 s->ofld_rxq[i] = q->rspq.abs_id;
1212 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1213 s->fw_evtq.cntxt_id);
1218 for_each_rdmarxq(s, i) {
1219 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1223 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1224 msi_idx, q->fl.size ? &q->fl : NULL,
1228 memset(&q->stats, 0, sizeof(q->stats));
1229 s->rdma_rxq[i] = q->rspq.abs_id;
1232 for_each_rdmaciq(s, i) {
1233 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1237 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1238 msi_idx, q->fl.size ? &q->fl : NULL,
1242 memset(&q->stats, 0, sizeof(q->stats));
1243 s->rdma_ciq[i] = q->rspq.abs_id;
1246 for_each_port(adap, i) {
1248 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1249 * have RDMA queues, and that's the right value.
1251 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1252 s->fw_evtq.cntxt_id,
1253 s->rdmarxq[i].rspq.cntxt_id);
1258 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1259 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1260 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1265 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1266 * The allocated memory is cleared.
1268 void *t4_alloc_mem(size_t size)
1270 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1278 * Free memory allocated through alloc_mem().
1280 static void t4_free_mem(void *addr)
1282 if (is_vmalloc_addr(addr))
1288 /* Send a Work Request to write the filter at a specified index. We construct
1289 * a Firmware Filter Work Request to have the work done and put the indicated
1290 * filter into "pending" mode which will prevent any further actions against
1291 * it till we get a reply from the firmware on the completion status of the
1294 static int set_filter_wr(struct adapter *adapter, int fidx)
1296 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1297 struct sk_buff *skb;
1298 struct fw_filter_wr *fwr;
1301 /* If the new filter requires loopback Destination MAC and/or VLAN
1302 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1305 if (f->fs.newdmac || f->fs.newvlan) {
1306 /* allocate L2T entry for new filter */
1307 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1310 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1311 f->fs.eport, f->fs.dmac)) {
1312 cxgb4_l2t_release(f->l2t);
1318 ftid = adapter->tids.ftid_base + fidx;
1320 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1321 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1322 memset(fwr, 0, sizeof(*fwr));
1324 /* It would be nice to put most of the following in t4_hw.c but most
1325 * of the work is translating the cxgbtool ch_filter_specification
1326 * into the Work Request and the definition of that structure is
1327 * currently in cxgbtool.h which isn't appropriate to pull into the
1328 * common code. We may eventually try to come up with a more neutral
1329 * filter specification structure but for now it's easiest to simply
1330 * put this fairly direct code in line ...
1332 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1333 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1335 htonl(V_FW_FILTER_WR_TID(ftid) |
1336 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1337 V_FW_FILTER_WR_NOREPLY(0) |
1338 V_FW_FILTER_WR_IQ(f->fs.iq));
1339 fwr->del_filter_to_l2tix =
1340 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1341 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1342 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1343 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1344 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1345 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1346 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1347 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1348 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1349 f->fs.newvlan == VLAN_REWRITE) |
1350 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1351 f->fs.newvlan == VLAN_REWRITE) |
1352 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1353 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1354 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1355 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1356 fwr->ethtype = htons(f->fs.val.ethtype);
1357 fwr->ethtypem = htons(f->fs.mask.ethtype);
1358 fwr->frag_to_ovlan_vldm =
1359 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1360 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1361 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1362 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1363 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1364 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1366 fwr->rx_chan_rx_rpl_iq =
1367 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1368 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1369 fwr->maci_to_matchtypem =
1370 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1371 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1372 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1373 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1374 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1375 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1376 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1377 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1378 fwr->ptcl = f->fs.val.proto;
1379 fwr->ptclm = f->fs.mask.proto;
1380 fwr->ttyp = f->fs.val.tos;
1381 fwr->ttypm = f->fs.mask.tos;
1382 fwr->ivlan = htons(f->fs.val.ivlan);
1383 fwr->ivlanm = htons(f->fs.mask.ivlan);
1384 fwr->ovlan = htons(f->fs.val.ovlan);
1385 fwr->ovlanm = htons(f->fs.mask.ovlan);
1386 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1387 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1388 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1389 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1390 fwr->lp = htons(f->fs.val.lport);
1391 fwr->lpm = htons(f->fs.mask.lport);
1392 fwr->fp = htons(f->fs.val.fport);
1393 fwr->fpm = htons(f->fs.mask.fport);
1395 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1397 /* Mark the filter as "pending" and ship off the Filter Work Request.
1398 * When we get the Work Request Reply we'll clear the pending status.
1401 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1402 t4_ofld_send(adapter, skb);
1406 /* Delete the filter at a specified index.
1408 static int del_filter_wr(struct adapter *adapter, int fidx)
1410 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1411 struct sk_buff *skb;
1412 struct fw_filter_wr *fwr;
1413 unsigned int len, ftid;
1416 ftid = adapter->tids.ftid_base + fidx;
1418 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1419 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1420 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1422 /* Mark the filter as "pending" and ship off the Filter Work Request.
1423 * When we get the Work Request Reply we'll clear the pending status.
1426 t4_mgmt_tx(adapter, skb);
1430 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1431 void *accel_priv, select_queue_fallback_t fallback)
1435 #ifdef CONFIG_CHELSIO_T4_DCB
1436 /* If a Data Center Bridging has been successfully negotiated on this
1437 * link then we'll use the skb's priority to map it to a TX Queue.
1438 * The skb's priority is determined via the VLAN Tag Priority Code
1441 if (cxgb4_dcb_enabled(dev)) {
1445 err = vlan_get_tag(skb, &vlan_tci);
1446 if (unlikely(err)) {
1447 if (net_ratelimit())
1449 "TX Packet without VLAN Tag on DCB Link\n");
1452 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1456 #endif /* CONFIG_CHELSIO_T4_DCB */
1459 txq = (skb_rx_queue_recorded(skb)
1460 ? skb_get_rx_queue(skb)
1461 : smp_processor_id());
1463 while (unlikely(txq >= dev->real_num_tx_queues))
1464 txq -= dev->real_num_tx_queues;
1469 return fallback(dev, skb) % dev->real_num_tx_queues;
1472 static inline int is_offload(const struct adapter *adap)
1474 return adap->params.offload;
1478 * Implementation of ethtool operations.
1481 static u32 get_msglevel(struct net_device *dev)
1483 return netdev2adap(dev)->msg_enable;
1486 static void set_msglevel(struct net_device *dev, u32 val)
1488 netdev2adap(dev)->msg_enable = val;
1491 static char stats_strings[][ETH_GSTRING_LEN] = {
1494 "TxBroadcastFrames ",
1495 "TxMulticastFrames ",
1501 "TxFrames128To255 ",
1502 "TxFrames256To511 ",
1503 "TxFrames512To1023 ",
1504 "TxFrames1024To1518 ",
1505 "TxFrames1519ToMax ",
1520 "RxBroadcastFrames ",
1521 "RxMulticastFrames ",
1533 "RxFrames128To255 ",
1534 "RxFrames256To511 ",
1535 "RxFrames512To1023 ",
1536 "RxFrames1024To1518 ",
1537 "RxFrames1519ToMax ",
1549 "RxBG0FramesDropped ",
1550 "RxBG1FramesDropped ",
1551 "RxBG2FramesDropped ",
1552 "RxBG3FramesDropped ",
1553 "RxBG0FramesTrunc ",
1554 "RxBG1FramesTrunc ",
1555 "RxBG2FramesTrunc ",
1556 "RxBG3FramesTrunc ",
1565 "WriteCoalSuccess ",
1569 static int get_sset_count(struct net_device *dev, int sset)
1573 return ARRAY_SIZE(stats_strings);
1579 #define T4_REGMAP_SIZE (160 * 1024)
1580 #define T5_REGMAP_SIZE (332 * 1024)
1582 static int get_regs_len(struct net_device *dev)
1584 struct adapter *adap = netdev2adap(dev);
1585 if (is_t4(adap->params.chip))
1586 return T4_REGMAP_SIZE;
1588 return T5_REGMAP_SIZE;
1591 static int get_eeprom_len(struct net_device *dev)
1596 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1598 struct adapter *adapter = netdev2adap(dev);
1600 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1601 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1602 strlcpy(info->bus_info, pci_name(adapter->pdev),
1603 sizeof(info->bus_info));
1605 if (adapter->params.fw_vers)
1606 snprintf(info->fw_version, sizeof(info->fw_version),
1607 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1608 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1609 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1610 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1611 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1612 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1613 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1614 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1615 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1618 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1620 if (stringset == ETH_SS_STATS)
1621 memcpy(data, stats_strings, sizeof(stats_strings));
1625 * port stats maintained per queue of the port. They should be in the same
1626 * order as in stats_strings above.
1628 struct queue_port_stats {
1638 static void collect_sge_port_stats(const struct adapter *adap,
1639 const struct port_info *p, struct queue_port_stats *s)
1642 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1643 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1645 memset(s, 0, sizeof(*s));
1646 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1648 s->tx_csum += tx->tx_cso;
1649 s->rx_csum += rx->stats.rx_cso;
1650 s->vlan_ex += rx->stats.vlan_ex;
1651 s->vlan_ins += tx->vlan_ins;
1652 s->gro_pkts += rx->stats.lro_pkts;
1653 s->gro_merged += rx->stats.lro_merged;
1657 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1660 struct port_info *pi = netdev_priv(dev);
1661 struct adapter *adapter = pi->adapter;
1664 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1666 data += sizeof(struct port_stats) / sizeof(u64);
1667 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1668 data += sizeof(struct queue_port_stats) / sizeof(u64);
1669 if (!is_t4(adapter->params.chip)) {
1670 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1671 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1672 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1673 *data = val1 - val2;
1678 memset(data, 0, 2 * sizeof(u64));
1684 * Return a version number to identify the type of adapter. The scheme is:
1685 * - bits 0..9: chip version
1686 * - bits 10..15: chip revision
1687 * - bits 16..23: register dump version
1689 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1691 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1692 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1695 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1698 u32 *p = buf + start;
1700 for ( ; start <= end; start += sizeof(u32))
1701 *p++ = t4_read_reg(ap, start);
1704 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1707 static const unsigned int t4_reg_ranges[] = {
1927 static const unsigned int t5_reg_ranges[] = {
2355 struct adapter *ap = netdev2adap(dev);
2356 static const unsigned int *reg_ranges;
2357 int arr_size = 0, buf_size = 0;
2359 if (is_t4(ap->params.chip)) {
2360 reg_ranges = &t4_reg_ranges[0];
2361 arr_size = ARRAY_SIZE(t4_reg_ranges);
2362 buf_size = T4_REGMAP_SIZE;
2364 reg_ranges = &t5_reg_ranges[0];
2365 arr_size = ARRAY_SIZE(t5_reg_ranges);
2366 buf_size = T5_REGMAP_SIZE;
2369 regs->version = mk_adap_vers(ap);
2371 memset(buf, 0, buf_size);
2372 for (i = 0; i < arr_size; i += 2)
2373 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2376 static int restart_autoneg(struct net_device *dev)
2378 struct port_info *p = netdev_priv(dev);
2380 if (!netif_running(dev))
2382 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2384 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2388 static int identify_port(struct net_device *dev,
2389 enum ethtool_phys_id_state state)
2392 struct adapter *adap = netdev2adap(dev);
2394 if (state == ETHTOOL_ID_ACTIVE)
2396 else if (state == ETHTOOL_ID_INACTIVE)
2401 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2404 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2408 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2409 type == FW_PORT_TYPE_BT_XAUI) {
2411 if (caps & FW_PORT_CAP_SPEED_100M)
2412 v |= SUPPORTED_100baseT_Full;
2413 if (caps & FW_PORT_CAP_SPEED_1G)
2414 v |= SUPPORTED_1000baseT_Full;
2415 if (caps & FW_PORT_CAP_SPEED_10G)
2416 v |= SUPPORTED_10000baseT_Full;
2417 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2418 v |= SUPPORTED_Backplane;
2419 if (caps & FW_PORT_CAP_SPEED_1G)
2420 v |= SUPPORTED_1000baseKX_Full;
2421 if (caps & FW_PORT_CAP_SPEED_10G)
2422 v |= SUPPORTED_10000baseKX4_Full;
2423 } else if (type == FW_PORT_TYPE_KR)
2424 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2425 else if (type == FW_PORT_TYPE_BP_AP)
2426 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2427 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2428 else if (type == FW_PORT_TYPE_BP4_AP)
2429 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2430 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2431 SUPPORTED_10000baseKX4_Full;
2432 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2433 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2434 v |= SUPPORTED_FIBRE;
2435 else if (type == FW_PORT_TYPE_BP40_BA)
2436 v |= SUPPORTED_40000baseSR4_Full;
2438 if (caps & FW_PORT_CAP_ANEG)
2439 v |= SUPPORTED_Autoneg;
2443 static unsigned int to_fw_linkcaps(unsigned int caps)
2447 if (caps & ADVERTISED_100baseT_Full)
2448 v |= FW_PORT_CAP_SPEED_100M;
2449 if (caps & ADVERTISED_1000baseT_Full)
2450 v |= FW_PORT_CAP_SPEED_1G;
2451 if (caps & ADVERTISED_10000baseT_Full)
2452 v |= FW_PORT_CAP_SPEED_10G;
2453 if (caps & ADVERTISED_40000baseSR4_Full)
2454 v |= FW_PORT_CAP_SPEED_40G;
2458 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2460 const struct port_info *p = netdev_priv(dev);
2462 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2463 p->port_type == FW_PORT_TYPE_BT_XFI ||
2464 p->port_type == FW_PORT_TYPE_BT_XAUI)
2465 cmd->port = PORT_TP;
2466 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2467 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2468 cmd->port = PORT_FIBRE;
2469 else if (p->port_type == FW_PORT_TYPE_SFP ||
2470 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2471 p->port_type == FW_PORT_TYPE_QSFP) {
2472 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2473 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2474 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2475 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2476 cmd->port = PORT_FIBRE;
2477 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2478 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2479 cmd->port = PORT_DA;
2481 cmd->port = PORT_OTHER;
2483 cmd->port = PORT_OTHER;
2485 if (p->mdio_addr >= 0) {
2486 cmd->phy_address = p->mdio_addr;
2487 cmd->transceiver = XCVR_EXTERNAL;
2488 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2489 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2491 cmd->phy_address = 0; /* not really, but no better option */
2492 cmd->transceiver = XCVR_INTERNAL;
2493 cmd->mdio_support = 0;
2496 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2497 cmd->advertising = from_fw_linkcaps(p->port_type,
2498 p->link_cfg.advertising);
2499 ethtool_cmd_speed_set(cmd,
2500 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2501 cmd->duplex = DUPLEX_FULL;
2502 cmd->autoneg = p->link_cfg.autoneg;
2508 static unsigned int speed_to_caps(int speed)
2511 return FW_PORT_CAP_SPEED_100M;
2513 return FW_PORT_CAP_SPEED_1G;
2515 return FW_PORT_CAP_SPEED_10G;
2517 return FW_PORT_CAP_SPEED_40G;
2521 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2524 struct port_info *p = netdev_priv(dev);
2525 struct link_config *lc = &p->link_cfg;
2526 u32 speed = ethtool_cmd_speed(cmd);
2528 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2531 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2533 * PHY offers a single speed. See if that's what's
2536 if (cmd->autoneg == AUTONEG_DISABLE &&
2537 (lc->supported & speed_to_caps(speed)))
2542 if (cmd->autoneg == AUTONEG_DISABLE) {
2543 cap = speed_to_caps(speed);
2545 if (!(lc->supported & cap) ||
2550 lc->requested_speed = cap;
2551 lc->advertising = 0;
2553 cap = to_fw_linkcaps(cmd->advertising);
2554 if (!(lc->supported & cap))
2556 lc->requested_speed = 0;
2557 lc->advertising = cap | FW_PORT_CAP_ANEG;
2559 lc->autoneg = cmd->autoneg;
2561 if (netif_running(dev))
2562 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2567 static void get_pauseparam(struct net_device *dev,
2568 struct ethtool_pauseparam *epause)
2570 struct port_info *p = netdev_priv(dev);
2572 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2573 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2574 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2577 static int set_pauseparam(struct net_device *dev,
2578 struct ethtool_pauseparam *epause)
2580 struct port_info *p = netdev_priv(dev);
2581 struct link_config *lc = &p->link_cfg;
2583 if (epause->autoneg == AUTONEG_DISABLE)
2584 lc->requested_fc = 0;
2585 else if (lc->supported & FW_PORT_CAP_ANEG)
2586 lc->requested_fc = PAUSE_AUTONEG;
2590 if (epause->rx_pause)
2591 lc->requested_fc |= PAUSE_RX;
2592 if (epause->tx_pause)
2593 lc->requested_fc |= PAUSE_TX;
2594 if (netif_running(dev))
2595 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2600 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2602 const struct port_info *pi = netdev_priv(dev);
2603 const struct sge *s = &pi->adapter->sge;
2605 e->rx_max_pending = MAX_RX_BUFFERS;
2606 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2607 e->rx_jumbo_max_pending = 0;
2608 e->tx_max_pending = MAX_TXQ_ENTRIES;
2610 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2611 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2612 e->rx_jumbo_pending = 0;
2613 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2616 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2619 const struct port_info *pi = netdev_priv(dev);
2620 struct adapter *adapter = pi->adapter;
2621 struct sge *s = &adapter->sge;
2623 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2624 e->tx_pending > MAX_TXQ_ENTRIES ||
2625 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2626 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2627 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2630 if (adapter->flags & FULL_INIT_DONE)
2633 for (i = 0; i < pi->nqsets; ++i) {
2634 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2635 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2636 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2641 static int closest_timer(const struct sge *s, int time)
2643 int i, delta, match = 0, min_delta = INT_MAX;
2645 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2646 delta = time - s->timer_val[i];
2649 if (delta < min_delta) {
2657 static int closest_thres(const struct sge *s, int thres)
2659 int i, delta, match = 0, min_delta = INT_MAX;
2661 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2662 delta = thres - s->counter_val[i];
2665 if (delta < min_delta) {
2674 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2676 static unsigned int qtimer_val(const struct adapter *adap,
2677 const struct sge_rspq *q)
2679 unsigned int idx = q->intr_params >> 1;
2681 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2685 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2687 * @us: the hold-off time in us, or 0 to disable timer
2688 * @cnt: the hold-off packet count, or 0 to disable counter
2690 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2691 * one of the two needs to be enabled for the queue to generate interrupts.
2693 static int set_rspq_intr_params(struct sge_rspq *q,
2694 unsigned int us, unsigned int cnt)
2696 struct adapter *adap = q->adap;
2698 if ((us | cnt) == 0)
2705 new_idx = closest_thres(&adap->sge, cnt);
2706 if (q->desc && q->pktcnt_idx != new_idx) {
2707 /* the queue has already been created, update it */
2708 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2709 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2710 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2711 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2716 q->pktcnt_idx = new_idx;
2719 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2720 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2725 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2726 * @dev: the network device
2727 * @us: the hold-off time in us, or 0 to disable timer
2728 * @cnt: the hold-off packet count, or 0 to disable counter
2730 * Set the RX interrupt hold-off parameters for a network device.
2732 static int set_rx_intr_params(struct net_device *dev,
2733 unsigned int us, unsigned int cnt)
2736 struct port_info *pi = netdev_priv(dev);
2737 struct adapter *adap = pi->adapter;
2738 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2740 for (i = 0; i < pi->nqsets; i++, q++) {
2741 err = set_rspq_intr_params(&q->rspq, us, cnt);
2748 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2750 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2751 c->rx_max_coalesced_frames);
2754 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2756 const struct port_info *pi = netdev_priv(dev);
2757 const struct adapter *adap = pi->adapter;
2758 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2760 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2761 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2762 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2767 * eeprom_ptov - translate a physical EEPROM address to virtual
2768 * @phys_addr: the physical EEPROM address
2769 * @fn: the PCI function number
2770 * @sz: size of function-specific area
2772 * Translate a physical EEPROM address to virtual. The first 1K is
2773 * accessed through virtual addresses starting at 31K, the rest is
2774 * accessed through virtual addresses starting at 0.
2776 * The mapping is as follows:
2777 * [0..1K) -> [31K..32K)
2778 * [1K..1K+A) -> [31K-A..31K)
2779 * [1K+A..ES) -> [0..ES-A-1K)
2781 * where A = @fn * @sz, and ES = EEPROM size.
2783 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2786 if (phys_addr < 1024)
2787 return phys_addr + (31 << 10);
2788 if (phys_addr < 1024 + fn)
2789 return 31744 - fn + phys_addr - 1024;
2790 if (phys_addr < EEPROMSIZE)
2791 return phys_addr - 1024 - fn;
2796 * The next two routines implement eeprom read/write from physical addresses.
2798 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2800 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2803 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2804 return vaddr < 0 ? vaddr : 0;
2807 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2809 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2812 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2813 return vaddr < 0 ? vaddr : 0;
2816 #define EEPROM_MAGIC 0x38E2F10C
2818 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2822 struct adapter *adapter = netdev2adap(dev);
2824 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2828 e->magic = EEPROM_MAGIC;
2829 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2830 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2833 memcpy(data, buf + e->offset, e->len);
2838 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2843 u32 aligned_offset, aligned_len, *p;
2844 struct adapter *adapter = netdev2adap(dev);
2846 if (eeprom->magic != EEPROM_MAGIC)
2849 aligned_offset = eeprom->offset & ~3;
2850 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2852 if (adapter->fn > 0) {
2853 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2855 if (aligned_offset < start ||
2856 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2860 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2862 * RMW possibly needed for first or last words.
2864 buf = kmalloc(aligned_len, GFP_KERNEL);
2867 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2868 if (!err && aligned_len > 4)
2869 err = eeprom_rd_phys(adapter,
2870 aligned_offset + aligned_len - 4,
2871 (u32 *)&buf[aligned_len - 4]);
2874 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2878 err = t4_seeprom_wp(adapter, false);
2882 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2883 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2884 aligned_offset += 4;
2888 err = t4_seeprom_wp(adapter, true);
2895 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2898 const struct firmware *fw;
2899 struct adapter *adap = netdev2adap(netdev);
2901 ef->data[sizeof(ef->data) - 1] = '\0';
2902 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2906 ret = t4_load_fw(adap, fw->data, fw->size);
2907 release_firmware(fw);
2909 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2913 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2914 #define BCAST_CRC 0xa0ccc1a6
2916 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2918 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2919 wol->wolopts = netdev2adap(dev)->wol;
2920 memset(&wol->sopass, 0, sizeof(wol->sopass));
2923 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2926 struct port_info *pi = netdev_priv(dev);
2928 if (wol->wolopts & ~WOL_SUPPORTED)
2930 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2931 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2932 if (wol->wolopts & WAKE_BCAST) {
2933 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2936 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2937 ~6ULL, ~0ULL, BCAST_CRC, true);
2939 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2943 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2945 const struct port_info *pi = netdev_priv(dev);
2946 netdev_features_t changed = dev->features ^ features;
2949 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2952 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2954 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2956 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2960 static u32 get_rss_table_size(struct net_device *dev)
2962 const struct port_info *pi = netdev_priv(dev);
2964 return pi->rss_size;
2967 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
2969 const struct port_info *pi = netdev_priv(dev);
2970 unsigned int n = pi->rss_size;
2977 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
2980 struct port_info *pi = netdev_priv(dev);
2982 for (i = 0; i < pi->rss_size; i++)
2984 if (pi->adapter->flags & FULL_INIT_DONE)
2985 return write_rss(pi, pi->rss);
2989 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2992 const struct port_info *pi = netdev_priv(dev);
2994 switch (info->cmd) {
2995 case ETHTOOL_GRXFH: {
2996 unsigned int v = pi->rss_mode;
2999 switch (info->flow_type) {
3001 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3002 info->data = RXH_IP_SRC | RXH_IP_DST |
3003 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3004 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3005 info->data = RXH_IP_SRC | RXH_IP_DST;
3008 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3009 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3010 info->data = RXH_IP_SRC | RXH_IP_DST |
3011 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3012 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3013 info->data = RXH_IP_SRC | RXH_IP_DST;
3016 case AH_ESP_V4_FLOW:
3018 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3019 info->data = RXH_IP_SRC | RXH_IP_DST;
3022 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3023 info->data = RXH_IP_SRC | RXH_IP_DST |
3024 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3025 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3026 info->data = RXH_IP_SRC | RXH_IP_DST;
3029 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3030 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3031 info->data = RXH_IP_SRC | RXH_IP_DST |
3032 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3033 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3034 info->data = RXH_IP_SRC | RXH_IP_DST;
3037 case AH_ESP_V6_FLOW:
3039 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3040 info->data = RXH_IP_SRC | RXH_IP_DST;
3045 case ETHTOOL_GRXRINGS:
3046 info->data = pi->nqsets;
3052 static const struct ethtool_ops cxgb_ethtool_ops = {
3053 .get_settings = get_settings,
3054 .set_settings = set_settings,
3055 .get_drvinfo = get_drvinfo,
3056 .get_msglevel = get_msglevel,
3057 .set_msglevel = set_msglevel,
3058 .get_ringparam = get_sge_param,
3059 .set_ringparam = set_sge_param,
3060 .get_coalesce = get_coalesce,
3061 .set_coalesce = set_coalesce,
3062 .get_eeprom_len = get_eeprom_len,
3063 .get_eeprom = get_eeprom,
3064 .set_eeprom = set_eeprom,
3065 .get_pauseparam = get_pauseparam,
3066 .set_pauseparam = set_pauseparam,
3067 .get_link = ethtool_op_get_link,
3068 .get_strings = get_strings,
3069 .set_phys_id = identify_port,
3070 .nway_reset = restart_autoneg,
3071 .get_sset_count = get_sset_count,
3072 .get_ethtool_stats = get_stats,
3073 .get_regs_len = get_regs_len,
3074 .get_regs = get_regs,
3077 .get_rxnfc = get_rxnfc,
3078 .get_rxfh_indir_size = get_rss_table_size,
3079 .get_rxfh = get_rss_table,
3080 .set_rxfh = set_rss_table,
3081 .flash_device = set_flash,
3087 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3091 loff_t avail = file_inode(file)->i_size;
3092 unsigned int mem = (uintptr_t)file->private_data & 3;
3093 struct adapter *adap = file->private_data - mem;
3101 if (count > avail - pos)
3102 count = avail - pos;
3104 data = t4_alloc_mem(count);
3108 spin_lock(&adap->win0_lock);
3109 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3110 spin_unlock(&adap->win0_lock);
3115 ret = copy_to_user(buf, data, count);
3121 *ppos = pos + count;
3125 static const struct file_operations mem_debugfs_fops = {
3126 .owner = THIS_MODULE,
3127 .open = simple_open,
3129 .llseek = default_llseek,
3132 static void add_debugfs_mem(struct adapter *adap, const char *name,
3133 unsigned int idx, unsigned int size_mb)
3137 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3138 (void *)adap + idx, &mem_debugfs_fops);
3139 if (de && de->d_inode)
3140 de->d_inode->i_size = size_mb << 20;
3143 static int setup_debugfs(struct adapter *adap)
3148 if (IS_ERR_OR_NULL(adap->debugfs_root))
3151 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
3152 if (i & EDRAM0_ENABLE) {
3153 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3154 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3156 if (i & EDRAM1_ENABLE) {
3157 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3158 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3160 if (is_t4(adap->params.chip)) {
3161 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3162 if (i & EXT_MEM_ENABLE)
3163 add_debugfs_mem(adap, "mc", MEM_MC,
3164 EXT_MEM_SIZE_GET(size));
3166 if (i & EXT_MEM_ENABLE) {
3167 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3168 add_debugfs_mem(adap, "mc0", MEM_MC0,
3169 EXT_MEM_SIZE_GET(size));
3171 if (i & EXT_MEM1_ENABLE) {
3172 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3173 add_debugfs_mem(adap, "mc1", MEM_MC1,
3174 EXT_MEM_SIZE_GET(size));
3178 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3184 * upper-layer driver support
3188 * Allocate an active-open TID and set it to the supplied value.
3190 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3194 spin_lock_bh(&t->atid_lock);
3196 union aopen_entry *p = t->afree;
3198 atid = (p - t->atid_tab) + t->atid_base;
3203 spin_unlock_bh(&t->atid_lock);
3206 EXPORT_SYMBOL(cxgb4_alloc_atid);
3209 * Release an active-open TID.
3211 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3213 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3215 spin_lock_bh(&t->atid_lock);
3219 spin_unlock_bh(&t->atid_lock);
3221 EXPORT_SYMBOL(cxgb4_free_atid);
3224 * Allocate a server TID and set it to the supplied value.
3226 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3230 spin_lock_bh(&t->stid_lock);
3231 if (family == PF_INET) {
3232 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3233 if (stid < t->nstids)
3234 __set_bit(stid, t->stid_bmap);
3238 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3243 t->stid_tab[stid].data = data;
3244 stid += t->stid_base;
3245 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3246 * This is equivalent to 4 TIDs. With CLIP enabled it
3249 if (family == PF_INET)
3252 t->stids_in_use += 4;
3254 spin_unlock_bh(&t->stid_lock);
3257 EXPORT_SYMBOL(cxgb4_alloc_stid);
3259 /* Allocate a server filter TID and set it to the supplied value.
3261 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3265 spin_lock_bh(&t->stid_lock);
3266 if (family == PF_INET) {
3267 stid = find_next_zero_bit(t->stid_bmap,
3268 t->nstids + t->nsftids, t->nstids);
3269 if (stid < (t->nstids + t->nsftids))
3270 __set_bit(stid, t->stid_bmap);
3277 t->stid_tab[stid].data = data;
3279 stid += t->sftid_base;
3282 spin_unlock_bh(&t->stid_lock);
3285 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3287 /* Release a server TID.
3289 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3291 /* Is it a server filter TID? */
3292 if (t->nsftids && (stid >= t->sftid_base)) {
3293 stid -= t->sftid_base;
3296 stid -= t->stid_base;
3299 spin_lock_bh(&t->stid_lock);
3300 if (family == PF_INET)
3301 __clear_bit(stid, t->stid_bmap);
3303 bitmap_release_region(t->stid_bmap, stid, 2);
3304 t->stid_tab[stid].data = NULL;
3305 if (family == PF_INET)
3308 t->stids_in_use -= 4;
3309 spin_unlock_bh(&t->stid_lock);
3311 EXPORT_SYMBOL(cxgb4_free_stid);
3314 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3316 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3319 struct cpl_tid_release *req;
3321 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3322 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3323 INIT_TP_WR(req, tid);
3324 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3328 * Queue a TID release request and if necessary schedule a work queue to
3331 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3334 void **p = &t->tid_tab[tid];
3335 struct adapter *adap = container_of(t, struct adapter, tids);
3337 spin_lock_bh(&adap->tid_release_lock);
3338 *p = adap->tid_release_head;
3339 /* Low 2 bits encode the Tx channel number */
3340 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3341 if (!adap->tid_release_task_busy) {
3342 adap->tid_release_task_busy = true;
3343 queue_work(workq, &adap->tid_release_task);
3345 spin_unlock_bh(&adap->tid_release_lock);
3349 * Process the list of pending TID release requests.
3351 static void process_tid_release_list(struct work_struct *work)
3353 struct sk_buff *skb;
3354 struct adapter *adap;
3356 adap = container_of(work, struct adapter, tid_release_task);
3358 spin_lock_bh(&adap->tid_release_lock);
3359 while (adap->tid_release_head) {
3360 void **p = adap->tid_release_head;
3361 unsigned int chan = (uintptr_t)p & 3;
3362 p = (void *)p - chan;
3364 adap->tid_release_head = *p;
3366 spin_unlock_bh(&adap->tid_release_lock);
3368 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3370 schedule_timeout_uninterruptible(1);
3372 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3373 t4_ofld_send(adap, skb);
3374 spin_lock_bh(&adap->tid_release_lock);
3376 adap->tid_release_task_busy = false;
3377 spin_unlock_bh(&adap->tid_release_lock);
3381 * Release a TID and inform HW. If we are unable to allocate the release
3382 * message we defer to a work queue.
3384 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3387 struct sk_buff *skb;
3388 struct adapter *adap = container_of(t, struct adapter, tids);
3390 old = t->tid_tab[tid];
3391 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3393 t->tid_tab[tid] = NULL;
3394 mk_tid_release(skb, chan, tid);
3395 t4_ofld_send(adap, skb);
3397 cxgb4_queue_tid_release(t, chan, tid);
3399 atomic_dec(&t->tids_in_use);
3401 EXPORT_SYMBOL(cxgb4_remove_tid);
3404 * Allocate and initialize the TID tables. Returns 0 on success.
3406 static int tid_init(struct tid_info *t)
3409 unsigned int stid_bmap_size;
3410 unsigned int natids = t->natids;
3411 struct adapter *adap = container_of(t, struct adapter, tids);
3413 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3414 size = t->ntids * sizeof(*t->tid_tab) +
3415 natids * sizeof(*t->atid_tab) +
3416 t->nstids * sizeof(*t->stid_tab) +
3417 t->nsftids * sizeof(*t->stid_tab) +
3418 stid_bmap_size * sizeof(long) +
3419 t->nftids * sizeof(*t->ftid_tab) +
3420 t->nsftids * sizeof(*t->ftid_tab);
3422 t->tid_tab = t4_alloc_mem(size);
3426 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3427 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3428 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3429 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3430 spin_lock_init(&t->stid_lock);
3431 spin_lock_init(&t->atid_lock);
3433 t->stids_in_use = 0;
3435 t->atids_in_use = 0;
3436 atomic_set(&t->tids_in_use, 0);
3438 /* Setup the free list for atid_tab and clear the stid bitmap. */
3441 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3442 t->afree = t->atid_tab;
3444 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3445 /* Reserve stid 0 for T4/T5 adapters */
3446 if (!t->stid_base &&
3447 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3448 __set_bit(0, t->stid_bmap);
3453 int cxgb4_clip_get(const struct net_device *dev,
3454 const struct in6_addr *lip)
3456 struct adapter *adap;
3457 struct fw_clip_cmd c;
3459 adap = netdev2adap(dev);
3460 memset(&c, 0, sizeof(c));
3461 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3462 FW_CMD_REQUEST | FW_CMD_WRITE);
3463 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3464 c.ip_hi = *(__be64 *)(lip->s6_addr);
3465 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3466 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3468 EXPORT_SYMBOL(cxgb4_clip_get);
3470 int cxgb4_clip_release(const struct net_device *dev,
3471 const struct in6_addr *lip)
3473 struct adapter *adap;
3474 struct fw_clip_cmd c;
3476 adap = netdev2adap(dev);
3477 memset(&c, 0, sizeof(c));
3478 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3479 FW_CMD_REQUEST | FW_CMD_READ);
3480 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3481 c.ip_hi = *(__be64 *)(lip->s6_addr);
3482 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3483 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3485 EXPORT_SYMBOL(cxgb4_clip_release);
3488 * cxgb4_create_server - create an IP server
3490 * @stid: the server TID
3491 * @sip: local IP address to bind server to
3492 * @sport: the server's TCP port
3493 * @queue: queue to direct messages from this server to
3495 * Create an IP server for the given port and address.
3496 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3498 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3499 __be32 sip, __be16 sport, __be16 vlan,
3503 struct sk_buff *skb;
3504 struct adapter *adap;
3505 struct cpl_pass_open_req *req;
3508 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3512 adap = netdev2adap(dev);
3513 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3515 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3516 req->local_port = sport;
3517 req->peer_port = htons(0);
3518 req->local_ip = sip;
3519 req->peer_ip = htonl(0);
3520 chan = rxq_to_chan(&adap->sge, queue);
3521 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3522 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3523 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3524 ret = t4_mgmt_tx(adap, skb);
3525 return net_xmit_eval(ret);
3527 EXPORT_SYMBOL(cxgb4_create_server);
3529 /* cxgb4_create_server6 - create an IPv6 server
3531 * @stid: the server TID
3532 * @sip: local IPv6 address to bind server to
3533 * @sport: the server's TCP port
3534 * @queue: queue to direct messages from this server to
3536 * Create an IPv6 server for the given port and address.
3537 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3539 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3540 const struct in6_addr *sip, __be16 sport,
3544 struct sk_buff *skb;
3545 struct adapter *adap;
3546 struct cpl_pass_open_req6 *req;
3549 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3553 adap = netdev2adap(dev);
3554 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3556 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3557 req->local_port = sport;
3558 req->peer_port = htons(0);
3559 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3560 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3561 req->peer_ip_hi = cpu_to_be64(0);
3562 req->peer_ip_lo = cpu_to_be64(0);
3563 chan = rxq_to_chan(&adap->sge, queue);
3564 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3565 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3566 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3567 ret = t4_mgmt_tx(adap, skb);
3568 return net_xmit_eval(ret);
3570 EXPORT_SYMBOL(cxgb4_create_server6);
3572 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3573 unsigned int queue, bool ipv6)
3575 struct sk_buff *skb;
3576 struct adapter *adap;
3577 struct cpl_close_listsvr_req *req;
3580 adap = netdev2adap(dev);
3582 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3586 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3588 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3589 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3590 LISTSVR_IPV6(0)) | QUEUENO(queue));
3591 ret = t4_mgmt_tx(adap, skb);
3592 return net_xmit_eval(ret);
3594 EXPORT_SYMBOL(cxgb4_remove_server);
3597 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3598 * @mtus: the HW MTU table
3599 * @mtu: the target MTU
3600 * @idx: index of selected entry in the MTU table
3602 * Returns the index and the value in the HW MTU table that is closest to
3603 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3604 * table, in which case that smallest available value is selected.
3606 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3611 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3617 EXPORT_SYMBOL(cxgb4_best_mtu);
3620 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3621 * @mtus: the HW MTU table
3622 * @header_size: Header Size
3623 * @data_size_max: maximum Data Segment Size
3624 * @data_size_align: desired Data Segment Size Alignment (2^N)
3625 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3627 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3628 * MTU Table based solely on a Maximum MTU parameter, we break that
3629 * parameter up into a Header Size and Maximum Data Segment Size, and
3630 * provide a desired Data Segment Size Alignment. If we find an MTU in
3631 * the Hardware MTU Table which will result in a Data Segment Size with
3632 * the requested alignment _and_ that MTU isn't "too far" from the
3633 * closest MTU, then we'll return that rather than the closest MTU.
3635 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3636 unsigned short header_size,
3637 unsigned short data_size_max,
3638 unsigned short data_size_align,
3639 unsigned int *mtu_idxp)
3641 unsigned short max_mtu = header_size + data_size_max;
3642 unsigned short data_size_align_mask = data_size_align - 1;
3643 int mtu_idx, aligned_mtu_idx;
3645 /* Scan the MTU Table till we find an MTU which is larger than our
3646 * Maximum MTU or we reach the end of the table. Along the way,
3647 * record the last MTU found, if any, which will result in a Data
3648 * Segment Length matching the requested alignment.
3650 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3651 unsigned short data_size = mtus[mtu_idx] - header_size;
3653 /* If this MTU minus the Header Size would result in a
3654 * Data Segment Size of the desired alignment, remember it.
3656 if ((data_size & data_size_align_mask) == 0)
3657 aligned_mtu_idx = mtu_idx;
3659 /* If we're not at the end of the Hardware MTU Table and the
3660 * next element is larger than our Maximum MTU, drop out of
3663 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3667 /* If we fell out of the loop because we ran to the end of the table,
3668 * then we just have to use the last [largest] entry.
3670 if (mtu_idx == NMTUS)
3673 /* If we found an MTU which resulted in the requested Data Segment
3674 * Length alignment and that's "not far" from the largest MTU which is
3675 * less than or equal to the maximum MTU, then use that.
3677 if (aligned_mtu_idx >= 0 &&
3678 mtu_idx - aligned_mtu_idx <= 1)
3679 mtu_idx = aligned_mtu_idx;
3681 /* If the caller has passed in an MTU Index pointer, pass the
3682 * MTU Index back. Return the MTU value.
3685 *mtu_idxp = mtu_idx;
3686 return mtus[mtu_idx];
3688 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3691 * cxgb4_port_chan - get the HW channel of a port
3692 * @dev: the net device for the port
3694 * Return the HW Tx channel of the given port.
3696 unsigned int cxgb4_port_chan(const struct net_device *dev)
3698 return netdev2pinfo(dev)->tx_chan;
3700 EXPORT_SYMBOL(cxgb4_port_chan);
3702 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3704 struct adapter *adap = netdev2adap(dev);
3705 u32 v1, v2, lp_count, hp_count;
3707 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3708 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3709 if (is_t4(adap->params.chip)) {
3710 lp_count = G_LP_COUNT(v1);
3711 hp_count = G_HP_COUNT(v1);
3713 lp_count = G_LP_COUNT_T5(v1);
3714 hp_count = G_HP_COUNT_T5(v2);
3716 return lpfifo ? lp_count : hp_count;
3718 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3721 * cxgb4_port_viid - get the VI id of a port
3722 * @dev: the net device for the port
3724 * Return the VI id of the given port.
3726 unsigned int cxgb4_port_viid(const struct net_device *dev)
3728 return netdev2pinfo(dev)->viid;
3730 EXPORT_SYMBOL(cxgb4_port_viid);
3733 * cxgb4_port_idx - get the index of a port
3734 * @dev: the net device for the port
3736 * Return the index of the given port.
3738 unsigned int cxgb4_port_idx(const struct net_device *dev)
3740 return netdev2pinfo(dev)->port_id;
3742 EXPORT_SYMBOL(cxgb4_port_idx);
3744 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3745 struct tp_tcp_stats *v6)
3747 struct adapter *adap = pci_get_drvdata(pdev);
3749 spin_lock(&adap->stats_lock);
3750 t4_tp_get_tcp_stats(adap, v4, v6);
3751 spin_unlock(&adap->stats_lock);
3753 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3755 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3756 const unsigned int *pgsz_order)
3758 struct adapter *adap = netdev2adap(dev);
3760 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3761 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3762 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3763 HPZ3(pgsz_order[3]));
3765 EXPORT_SYMBOL(cxgb4_iscsi_init);
3767 int cxgb4_flush_eq_cache(struct net_device *dev)
3769 struct adapter *adap = netdev2adap(dev);
3772 ret = t4_fwaddrspace_write(adap, adap->mbox,
3773 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3776 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3778 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3780 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3784 spin_lock(&adap->win0_lock);
3785 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3786 sizeof(indices), (__be32 *)&indices,
3788 spin_unlock(&adap->win0_lock);
3790 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3791 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3796 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3799 struct adapter *adap = netdev2adap(dev);
3800 u16 hw_pidx, hw_cidx;
3803 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3807 if (pidx != hw_pidx) {
3810 if (pidx >= hw_pidx)
3811 delta = pidx - hw_pidx;
3813 delta = size - hw_pidx + pidx;
3815 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3816 QID(qid) | PIDX(delta));
3821 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3823 void cxgb4_disable_db_coalescing(struct net_device *dev)
3825 struct adapter *adap;
3827 adap = netdev2adap(dev);
3828 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3831 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3833 void cxgb4_enable_db_coalescing(struct net_device *dev)
3835 struct adapter *adap;
3837 adap = netdev2adap(dev);
3838 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3840 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3842 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3844 struct adapter *adap;
3845 u32 offset, memtype, memaddr;
3846 u32 edc0_size, edc1_size, mc0_size, mc1_size;
3847 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3850 adap = netdev2adap(dev);
3852 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3854 /* Figure out where the offset lands in the Memory Type/Address scheme.
3855 * This code assumes that the memory is laid out starting at offset 0
3856 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3857 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3858 * MC0, and some have both MC0 and MC1.
3860 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3861 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3862 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3864 edc0_end = edc0_size;
3865 edc1_end = edc0_end + edc1_size;
3866 mc0_end = edc1_end + mc0_size;
3868 if (offset < edc0_end) {
3871 } else if (offset < edc1_end) {
3873 memaddr = offset - edc0_end;
3875 if (offset < mc0_end) {
3877 memaddr = offset - edc1_end;
3878 } else if (is_t4(adap->params.chip)) {
3879 /* T4 only has a single memory channel */
3882 mc1_size = EXT_MEM_SIZE_GET(
3884 MA_EXT_MEMORY1_BAR)) << 20;
3885 mc1_end = mc0_end + mc1_size;
3886 if (offset < mc1_end) {
3888 memaddr = offset - mc0_end;
3890 /* offset beyond the end of any memory */
3896 spin_lock(&adap->win0_lock);
3897 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3898 spin_unlock(&adap->win0_lock);
3902 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3906 EXPORT_SYMBOL(cxgb4_read_tpte);
3908 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3911 struct adapter *adap;
3913 adap = netdev2adap(dev);
3914 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3915 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3917 return ((u64)hi << 32) | (u64)lo;
3919 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3921 static struct pci_driver cxgb4_driver;
3923 static void check_neigh_update(struct neighbour *neigh)
3925 const struct device *parent;
3926 const struct net_device *netdev = neigh->dev;
3928 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3929 netdev = vlan_dev_real_dev(netdev);
3930 parent = netdev->dev.parent;
3931 if (parent && parent->driver == &cxgb4_driver.driver)
3932 t4_l2t_update(dev_get_drvdata(parent), neigh);
3935 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3939 case NETEVENT_NEIGH_UPDATE:
3940 check_neigh_update(data);
3942 case NETEVENT_REDIRECT:
3949 static bool netevent_registered;
3950 static struct notifier_block cxgb4_netevent_nb = {
3951 .notifier_call = netevent_cb
3954 static void drain_db_fifo(struct adapter *adap, int usecs)
3956 u32 v1, v2, lp_count, hp_count;
3959 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3960 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3961 if (is_t4(adap->params.chip)) {
3962 lp_count = G_LP_COUNT(v1);
3963 hp_count = G_HP_COUNT(v1);
3965 lp_count = G_LP_COUNT_T5(v1);
3966 hp_count = G_HP_COUNT_T5(v2);
3969 if (lp_count == 0 && hp_count == 0)
3971 set_current_state(TASK_UNINTERRUPTIBLE);
3972 schedule_timeout(usecs_to_jiffies(usecs));
3976 static void disable_txq_db(struct sge_txq *q)
3978 unsigned long flags;
3980 spin_lock_irqsave(&q->db_lock, flags);
3982 spin_unlock_irqrestore(&q->db_lock, flags);
3985 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3987 spin_lock_irq(&q->db_lock);
3988 if (q->db_pidx_inc) {
3989 /* Make sure that all writes to the TX descriptors
3990 * are committed before we tell HW about them.
3993 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3994 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3998 spin_unlock_irq(&q->db_lock);
4001 static void disable_dbs(struct adapter *adap)
4005 for_each_ethrxq(&adap->sge, i)
4006 disable_txq_db(&adap->sge.ethtxq[i].q);
4007 for_each_ofldrxq(&adap->sge, i)
4008 disable_txq_db(&adap->sge.ofldtxq[i].q);
4009 for_each_port(adap, i)
4010 disable_txq_db(&adap->sge.ctrlq[i].q);
4013 static void enable_dbs(struct adapter *adap)
4017 for_each_ethrxq(&adap->sge, i)
4018 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
4019 for_each_ofldrxq(&adap->sge, i)
4020 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
4021 for_each_port(adap, i)
4022 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
4025 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
4027 if (adap->uld_handle[CXGB4_ULD_RDMA])
4028 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
4032 static void process_db_full(struct work_struct *work)
4034 struct adapter *adap;
4036 adap = container_of(work, struct adapter, db_full_task);
4038 drain_db_fifo(adap, dbfifo_drain_delay);
4040 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4041 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4042 DBFIFO_HP_INT | DBFIFO_LP_INT,
4043 DBFIFO_HP_INT | DBFIFO_LP_INT);
4046 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4048 u16 hw_pidx, hw_cidx;
4051 spin_lock_irq(&q->db_lock);
4052 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4055 if (q->db_pidx != hw_pidx) {
4058 if (q->db_pidx >= hw_pidx)
4059 delta = q->db_pidx - hw_pidx;
4061 delta = q->size - hw_pidx + q->db_pidx;
4063 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4064 QID(q->cntxt_id) | PIDX(delta));
4069 spin_unlock_irq(&q->db_lock);
4071 CH_WARN(adap, "DB drop recovery failed.\n");
4073 static void recover_all_queues(struct adapter *adap)
4077 for_each_ethrxq(&adap->sge, i)
4078 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4079 for_each_ofldrxq(&adap->sge, i)
4080 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4081 for_each_port(adap, i)
4082 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4085 static void process_db_drop(struct work_struct *work)
4087 struct adapter *adap;
4089 adap = container_of(work, struct adapter, db_drop_task);
4091 if (is_t4(adap->params.chip)) {
4092 drain_db_fifo(adap, dbfifo_drain_delay);
4093 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4094 drain_db_fifo(adap, dbfifo_drain_delay);
4095 recover_all_queues(adap);
4096 drain_db_fifo(adap, dbfifo_drain_delay);
4098 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4100 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4101 u16 qid = (dropped_db >> 15) & 0x1ffff;
4102 u16 pidx_inc = dropped_db & 0x1fff;
4104 unsigned short udb_density;
4105 unsigned long qpshift;
4109 dev_warn(adap->pdev_dev,
4110 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4112 (dropped_db >> 14) & 1,
4113 (dropped_db >> 13) & 1,
4116 drain_db_fifo(adap, 1);
4118 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4119 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4120 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4121 qpshift = PAGE_SHIFT - ilog2(udb_density);
4122 udb = qid << qpshift;
4124 page = udb / PAGE_SIZE;
4125 udb += (qid - (page * udb_density)) * 128;
4127 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4129 /* Re-enable BAR2 WC */
4130 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4133 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4136 void t4_db_full(struct adapter *adap)
4138 if (is_t4(adap->params.chip)) {
4140 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4141 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4142 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4143 queue_work(workq, &adap->db_full_task);
4147 void t4_db_dropped(struct adapter *adap)
4149 if (is_t4(adap->params.chip)) {
4151 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4153 queue_work(workq, &adap->db_drop_task);
4156 static void uld_attach(struct adapter *adap, unsigned int uld)
4159 struct cxgb4_lld_info lli;
4162 lli.pdev = adap->pdev;
4164 lli.l2t = adap->l2t;
4165 lli.tids = &adap->tids;
4166 lli.ports = adap->port;
4167 lli.vr = &adap->vres;
4168 lli.mtus = adap->params.mtus;
4169 if (uld == CXGB4_ULD_RDMA) {
4170 lli.rxq_ids = adap->sge.rdma_rxq;
4171 lli.ciq_ids = adap->sge.rdma_ciq;
4172 lli.nrxq = adap->sge.rdmaqs;
4173 lli.nciq = adap->sge.rdmaciqs;
4174 } else if (uld == CXGB4_ULD_ISCSI) {
4175 lli.rxq_ids = adap->sge.ofld_rxq;
4176 lli.nrxq = adap->sge.ofldqsets;
4178 lli.ntxq = adap->sge.ofldqsets;
4179 lli.nchan = adap->params.nports;
4180 lli.nports = adap->params.nports;
4181 lli.wr_cred = adap->params.ofldq_wr_cred;
4182 lli.adapter_type = adap->params.chip;
4183 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4184 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4185 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4186 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4188 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4189 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4191 lli.filt_mode = adap->params.tp.vlan_pri_map;
4192 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4193 for (i = 0; i < NCHAN; i++)
4195 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4196 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4197 lli.fw_vers = adap->params.fw_vers;
4198 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4199 lli.sge_ingpadboundary = adap->sge.fl_align;
4200 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4201 lli.sge_pktshift = adap->sge.pktshift;
4202 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4203 lli.max_ordird_qp = adap->params.max_ordird_qp;
4204 lli.max_ird_adapter = adap->params.max_ird_adapter;
4205 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4207 handle = ulds[uld].add(&lli);
4208 if (IS_ERR(handle)) {
4209 dev_warn(adap->pdev_dev,
4210 "could not attach to the %s driver, error %ld\n",
4211 uld_str[uld], PTR_ERR(handle));
4215 adap->uld_handle[uld] = handle;
4217 if (!netevent_registered) {
4218 register_netevent_notifier(&cxgb4_netevent_nb);
4219 netevent_registered = true;
4222 if (adap->flags & FULL_INIT_DONE)
4223 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4226 static void attach_ulds(struct adapter *adap)
4230 spin_lock(&adap_rcu_lock);
4231 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4232 spin_unlock(&adap_rcu_lock);
4234 mutex_lock(&uld_mutex);
4235 list_add_tail(&adap->list_node, &adapter_list);
4236 for (i = 0; i < CXGB4_ULD_MAX; i++)
4238 uld_attach(adap, i);
4239 mutex_unlock(&uld_mutex);
4242 static void detach_ulds(struct adapter *adap)
4246 mutex_lock(&uld_mutex);
4247 list_del(&adap->list_node);
4248 for (i = 0; i < CXGB4_ULD_MAX; i++)
4249 if (adap->uld_handle[i]) {
4250 ulds[i].state_change(adap->uld_handle[i],
4251 CXGB4_STATE_DETACH);
4252 adap->uld_handle[i] = NULL;
4254 if (netevent_registered && list_empty(&adapter_list)) {
4255 unregister_netevent_notifier(&cxgb4_netevent_nb);
4256 netevent_registered = false;
4258 mutex_unlock(&uld_mutex);
4260 spin_lock(&adap_rcu_lock);
4261 list_del_rcu(&adap->rcu_node);
4262 spin_unlock(&adap_rcu_lock);
4265 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4269 mutex_lock(&uld_mutex);
4270 for (i = 0; i < CXGB4_ULD_MAX; i++)
4271 if (adap->uld_handle[i])
4272 ulds[i].state_change(adap->uld_handle[i], new_state);
4273 mutex_unlock(&uld_mutex);
4277 * cxgb4_register_uld - register an upper-layer driver
4278 * @type: the ULD type
4279 * @p: the ULD methods
4281 * Registers an upper-layer driver with this driver and notifies the ULD
4282 * about any presently available devices that support its type. Returns
4283 * %-EBUSY if a ULD of the same type is already registered.
4285 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4288 struct adapter *adap;
4290 if (type >= CXGB4_ULD_MAX)
4292 mutex_lock(&uld_mutex);
4293 if (ulds[type].add) {
4298 list_for_each_entry(adap, &adapter_list, list_node)
4299 uld_attach(adap, type);
4300 out: mutex_unlock(&uld_mutex);
4303 EXPORT_SYMBOL(cxgb4_register_uld);
4306 * cxgb4_unregister_uld - unregister an upper-layer driver
4307 * @type: the ULD type
4309 * Unregisters an existing upper-layer driver.
4311 int cxgb4_unregister_uld(enum cxgb4_uld type)
4313 struct adapter *adap;
4315 if (type >= CXGB4_ULD_MAX)
4317 mutex_lock(&uld_mutex);
4318 list_for_each_entry(adap, &adapter_list, list_node)
4319 adap->uld_handle[type] = NULL;
4320 ulds[type].add = NULL;
4321 mutex_unlock(&uld_mutex);
4324 EXPORT_SYMBOL(cxgb4_unregister_uld);
4326 /* Check if netdev on which event is occured belongs to us or not. Return
4327 * success (true) if it belongs otherwise failure (false).
4328 * Called with rcu_read_lock() held.
4330 static bool cxgb4_netdev(const struct net_device *netdev)
4332 struct adapter *adap;
4335 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4336 for (i = 0; i < MAX_NPORTS; i++)
4337 if (adap->port[i] == netdev)
4342 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4343 unsigned long event)
4345 int ret = NOTIFY_DONE;
4348 if (cxgb4_netdev(event_dev)) {
4351 ret = cxgb4_clip_get(event_dev,
4352 (const struct in6_addr *)ifa->addr.s6_addr);
4360 cxgb4_clip_release(event_dev,
4361 (const struct in6_addr *)ifa->addr.s6_addr);
4372 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4373 unsigned long event, void *data)
4375 struct inet6_ifaddr *ifa = data;
4376 struct net_device *event_dev;
4377 int ret = NOTIFY_DONE;
4378 struct bonding *bond = netdev_priv(ifa->idev->dev);
4379 struct list_head *iter;
4380 struct slave *slave;
4381 struct pci_dev *first_pdev = NULL;
4383 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4384 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4385 ret = clip_add(event_dev, ifa, event);
4386 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4387 /* It is possible that two different adapters are bonded in one
4388 * bond. We need to find such different adapters and add clip
4389 * in all of them only once.
4391 read_lock(&bond->lock);
4392 bond_for_each_slave(bond, slave, iter) {
4394 ret = clip_add(slave->dev, ifa, event);
4395 /* If clip_add is success then only initialize
4396 * first_pdev since it means it is our device
4398 if (ret == NOTIFY_OK)
4399 first_pdev = to_pci_dev(
4400 slave->dev->dev.parent);
4401 } else if (first_pdev !=
4402 to_pci_dev(slave->dev->dev.parent))
4403 ret = clip_add(slave->dev, ifa, event);
4405 read_unlock(&bond->lock);
4407 ret = clip_add(ifa->idev->dev, ifa, event);
4412 static struct notifier_block cxgb4_inet6addr_notifier = {
4413 .notifier_call = cxgb4_inet6addr_handler
4416 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4417 * a physical device.
4418 * The physical device reference is needed to send the actul CLIP command.
4420 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4422 struct inet6_dev *idev = NULL;
4423 struct inet6_ifaddr *ifa;
4426 idev = __in6_dev_get(root_dev);
4430 read_lock_bh(&idev->lock);
4431 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4432 ret = cxgb4_clip_get(dev,
4433 (const struct in6_addr *)ifa->addr.s6_addr);
4437 read_unlock_bh(&idev->lock);
4442 static int update_root_dev_clip(struct net_device *dev)
4444 struct net_device *root_dev = NULL;
4447 /* First populate the real net device's IPv6 addresses */
4448 ret = update_dev_clip(dev, dev);
4452 /* Parse all bond and vlan devices layered on top of the physical dev */
4453 for (i = 0; i < VLAN_N_VID; i++) {
4454 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4458 ret = update_dev_clip(root_dev, dev);
4465 static void update_clip(const struct adapter *adap)
4468 struct net_device *dev;
4473 for (i = 0; i < MAX_NPORTS; i++) {
4474 dev = adap->port[i];
4478 ret = update_root_dev_clip(dev);
4487 * cxgb_up - enable the adapter
4488 * @adap: adapter being enabled
4490 * Called when the first port is enabled, this function performs the
4491 * actions necessary to make an adapter operational, such as completing
4492 * the initialization of HW modules, and enabling interrupts.
4494 * Must be called with the rtnl lock held.
4496 static int cxgb_up(struct adapter *adap)
4500 err = setup_sge_queues(adap);
4503 err = setup_rss(adap);
4507 if (adap->flags & USING_MSIX) {
4508 name_msix_vecs(adap);
4509 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4510 adap->msix_info[0].desc, adap);
4514 err = request_msix_queue_irqs(adap);
4516 free_irq(adap->msix_info[0].vec, adap);
4520 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4521 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4522 adap->port[0]->name, adap);
4528 t4_intr_enable(adap);
4529 adap->flags |= FULL_INIT_DONE;
4530 notify_ulds(adap, CXGB4_STATE_UP);
4535 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4537 t4_free_sge_resources(adap);
4541 static void cxgb_down(struct adapter *adapter)
4543 t4_intr_disable(adapter);
4544 cancel_work_sync(&adapter->tid_release_task);
4545 cancel_work_sync(&adapter->db_full_task);
4546 cancel_work_sync(&adapter->db_drop_task);
4547 adapter->tid_release_task_busy = false;
4548 adapter->tid_release_head = NULL;
4550 if (adapter->flags & USING_MSIX) {
4551 free_msix_queue_irqs(adapter);
4552 free_irq(adapter->msix_info[0].vec, adapter);
4554 free_irq(adapter->pdev->irq, adapter);
4555 quiesce_rx(adapter);
4556 t4_sge_stop(adapter);
4557 t4_free_sge_resources(adapter);
4558 adapter->flags &= ~FULL_INIT_DONE;
4562 * net_device operations
4564 static int cxgb_open(struct net_device *dev)
4567 struct port_info *pi = netdev_priv(dev);
4568 struct adapter *adapter = pi->adapter;
4570 netif_carrier_off(dev);
4572 if (!(adapter->flags & FULL_INIT_DONE)) {
4573 err = cxgb_up(adapter);
4578 err = link_start(dev);
4580 netif_tx_start_all_queues(dev);
4584 static int cxgb_close(struct net_device *dev)
4586 struct port_info *pi = netdev_priv(dev);
4587 struct adapter *adapter = pi->adapter;
4589 netif_tx_stop_all_queues(dev);
4590 netif_carrier_off(dev);
4591 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4594 /* Return an error number if the indicated filter isn't writable ...
4596 static int writable_filter(struct filter_entry *f)
4606 /* Delete the filter at the specified index (if valid). The checks for all
4607 * the common problems with doing this like the filter being locked, currently
4608 * pending in another operation, etc.
4610 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4612 struct filter_entry *f;
4615 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4618 f = &adapter->tids.ftid_tab[fidx];
4619 ret = writable_filter(f);
4623 return del_filter_wr(adapter, fidx);
4628 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4629 __be32 sip, __be16 sport, __be16 vlan,
4630 unsigned int queue, unsigned char port, unsigned char mask)
4633 struct filter_entry *f;
4634 struct adapter *adap;
4638 adap = netdev2adap(dev);
4640 /* Adjust stid to correct filter index */
4641 stid -= adap->tids.sftid_base;
4642 stid += adap->tids.nftids;
4644 /* Check to make sure the filter requested is writable ...
4646 f = &adap->tids.ftid_tab[stid];
4647 ret = writable_filter(f);
4651 /* Clear out any old resources being used by the filter before
4652 * we start constructing the new filter.
4655 clear_filter(adap, f);
4657 /* Clear out filter specifications */
4658 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4659 f->fs.val.lport = cpu_to_be16(sport);
4660 f->fs.mask.lport = ~0;
4662 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4663 for (i = 0; i < 4; i++) {
4664 f->fs.val.lip[i] = val[i];
4665 f->fs.mask.lip[i] = ~0;
4667 if (adap->params.tp.vlan_pri_map & F_PORT) {
4668 f->fs.val.iport = port;
4669 f->fs.mask.iport = mask;
4673 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4674 f->fs.val.proto = IPPROTO_TCP;
4675 f->fs.mask.proto = ~0;
4680 /* Mark filter as locked */
4684 ret = set_filter_wr(adap, stid);
4686 clear_filter(adap, f);
4692 EXPORT_SYMBOL(cxgb4_create_server_filter);
4694 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4695 unsigned int queue, bool ipv6)
4698 struct filter_entry *f;
4699 struct adapter *adap;
4701 adap = netdev2adap(dev);
4703 /* Adjust stid to correct filter index */
4704 stid -= adap->tids.sftid_base;
4705 stid += adap->tids.nftids;
4707 f = &adap->tids.ftid_tab[stid];
4708 /* Unlock the filter */
4711 ret = delete_filter(adap, stid);
4717 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4719 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4720 struct rtnl_link_stats64 *ns)
4722 struct port_stats stats;
4723 struct port_info *p = netdev_priv(dev);
4724 struct adapter *adapter = p->adapter;
4726 /* Block retrieving statistics during EEH error
4727 * recovery. Otherwise, the recovery might fail
4728 * and the PCI device will be removed permanently
4730 spin_lock(&adapter->stats_lock);
4731 if (!netif_device_present(dev)) {
4732 spin_unlock(&adapter->stats_lock);
4735 t4_get_port_stats(adapter, p->tx_chan, &stats);
4736 spin_unlock(&adapter->stats_lock);
4738 ns->tx_bytes = stats.tx_octets;
4739 ns->tx_packets = stats.tx_frames;
4740 ns->rx_bytes = stats.rx_octets;
4741 ns->rx_packets = stats.rx_frames;
4742 ns->multicast = stats.rx_mcast_frames;
4744 /* detailed rx_errors */
4745 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4747 ns->rx_over_errors = 0;
4748 ns->rx_crc_errors = stats.rx_fcs_err;
4749 ns->rx_frame_errors = stats.rx_symbol_err;
4750 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4751 stats.rx_ovflow2 + stats.rx_ovflow3 +
4752 stats.rx_trunc0 + stats.rx_trunc1 +
4753 stats.rx_trunc2 + stats.rx_trunc3;
4754 ns->rx_missed_errors = 0;
4756 /* detailed tx_errors */
4757 ns->tx_aborted_errors = 0;
4758 ns->tx_carrier_errors = 0;
4759 ns->tx_fifo_errors = 0;
4760 ns->tx_heartbeat_errors = 0;
4761 ns->tx_window_errors = 0;
4763 ns->tx_errors = stats.tx_error_frames;
4764 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4765 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4769 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4772 int ret = 0, prtad, devad;
4773 struct port_info *pi = netdev_priv(dev);
4774 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4778 if (pi->mdio_addr < 0)
4780 data->phy_id = pi->mdio_addr;
4784 if (mdio_phy_id_is_c45(data->phy_id)) {
4785 prtad = mdio_phy_id_prtad(data->phy_id);
4786 devad = mdio_phy_id_devad(data->phy_id);
4787 } else if (data->phy_id < 32) {
4788 prtad = data->phy_id;
4790 data->reg_num &= 0x1f;
4794 mbox = pi->adapter->fn;
4795 if (cmd == SIOCGMIIREG)
4796 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4797 data->reg_num, &data->val_out);
4799 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4800 data->reg_num, data->val_in);
4808 static void cxgb_set_rxmode(struct net_device *dev)
4810 /* unfortunately we can't return errors to the stack */
4811 set_rxmode(dev, -1, false);
4814 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4817 struct port_info *pi = netdev_priv(dev);
4819 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4821 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4828 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4831 struct sockaddr *addr = p;
4832 struct port_info *pi = netdev_priv(dev);
4834 if (!is_valid_ether_addr(addr->sa_data))
4835 return -EADDRNOTAVAIL;
4837 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4838 pi->xact_addr_filt, addr->sa_data, true, true);
4842 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4843 pi->xact_addr_filt = ret;
4847 #ifdef CONFIG_NET_POLL_CONTROLLER
4848 static void cxgb_netpoll(struct net_device *dev)
4850 struct port_info *pi = netdev_priv(dev);
4851 struct adapter *adap = pi->adapter;
4853 if (adap->flags & USING_MSIX) {
4855 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4857 for (i = pi->nqsets; i; i--, rx++)
4858 t4_sge_intr_msix(0, &rx->rspq);
4860 t4_intr_handler(adap)(0, adap);
4864 static const struct net_device_ops cxgb4_netdev_ops = {
4865 .ndo_open = cxgb_open,
4866 .ndo_stop = cxgb_close,
4867 .ndo_start_xmit = t4_eth_xmit,
4868 .ndo_select_queue = cxgb_select_queue,
4869 .ndo_get_stats64 = cxgb_get_stats,
4870 .ndo_set_rx_mode = cxgb_set_rxmode,
4871 .ndo_set_mac_address = cxgb_set_mac_addr,
4872 .ndo_set_features = cxgb_set_features,
4873 .ndo_validate_addr = eth_validate_addr,
4874 .ndo_do_ioctl = cxgb_ioctl,
4875 .ndo_change_mtu = cxgb_change_mtu,
4876 #ifdef CONFIG_NET_POLL_CONTROLLER
4877 .ndo_poll_controller = cxgb_netpoll,
4881 void t4_fatal_err(struct adapter *adap)
4883 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4884 t4_intr_disable(adap);
4885 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4888 /* Return the specified PCI-E Configuration Space register from our Physical
4889 * Function. We try first via a Firmware LDST Command since we prefer to let
4890 * the firmware own all of these registers, but if that fails we go for it
4891 * directly ourselves.
4893 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4895 struct fw_ldst_cmd ldst_cmd;
4899 /* Construct and send the Firmware LDST Command to retrieve the
4900 * specified PCI-E Configuration Space register.
4902 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4903 ldst_cmd.op_to_addrspace =
4904 htonl(FW_CMD_OP(FW_LDST_CMD) |
4907 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4908 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4909 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4910 ldst_cmd.u.pcie.ctrl_to_fn =
4911 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4912 ldst_cmd.u.pcie.r = reg;
4913 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4916 /* If the LDST Command suucceeded, exctract the returned register
4917 * value. Otherwise read it directly ourself.
4920 val = ntohl(ldst_cmd.u.pcie.data[0]);
4922 t4_hw_pci_read_cfg4(adap, reg, &val);
4927 static void setup_memwin(struct adapter *adap)
4929 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4931 if (is_t4(adap->params.chip)) {
4934 /* Truncation intentional: we only read the bottom 32-bits of
4935 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4936 * mechanism to read BAR0 instead of using
4937 * pci_resource_start() because we could be operating from
4938 * within a Virtual Machine which is trapping our accesses to
4939 * our Configuration Space and we need to set up the PCI-E
4940 * Memory Window decoders with the actual addresses which will
4941 * be coming across the PCI-E link.
4943 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4944 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4945 adap->t4_bar0 = bar0;
4947 mem_win0_base = bar0 + MEMWIN0_BASE;
4948 mem_win1_base = bar0 + MEMWIN1_BASE;
4949 mem_win2_base = bar0 + MEMWIN2_BASE;
4950 mem_win2_aperture = MEMWIN2_APERTURE;
4952 /* For T5, only relative offset inside the PCIe BAR is passed */
4953 mem_win0_base = MEMWIN0_BASE;
4954 mem_win1_base = MEMWIN1_BASE;
4955 mem_win2_base = MEMWIN2_BASE_T5;
4956 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4958 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4959 mem_win0_base | BIR(0) |
4960 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4961 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4962 mem_win1_base | BIR(0) |
4963 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4964 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4965 mem_win2_base | BIR(0) |
4966 WINDOW(ilog2(mem_win2_aperture) - 10));
4967 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
4970 static void setup_memwin_rdma(struct adapter *adap)
4972 if (adap->vres.ocq.size) {
4976 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
4977 start &= PCI_BASE_ADDRESS_MEM_MASK;
4978 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4979 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4981 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4982 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4984 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4985 adap->vres.ocq.start);
4987 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4991 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4996 /* get device capabilities */
4997 memset(c, 0, sizeof(*c));
4998 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4999 FW_CMD_REQUEST | FW_CMD_READ);
5000 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
5001 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
5005 /* select capabilities we'll be using */
5006 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5008 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5010 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5011 } else if (vf_acls) {
5012 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
5015 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5016 FW_CMD_REQUEST | FW_CMD_WRITE);
5017 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
5021 ret = t4_config_glbl_rss(adap, adap->fn,
5022 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5023 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5024 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
5028 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
5029 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
5035 /* tweak some settings */
5036 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5037 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5038 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5039 v = t4_read_reg(adap, TP_PIO_DATA);
5040 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
5042 /* first 4 Tx modulation queues point to consecutive Tx channels */
5043 adap->params.tp.tx_modq_map = 0xE4;
5044 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5045 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5047 /* associate each Tx modulation queue with consecutive Tx channels */
5049 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5050 &v, 1, A_TP_TX_SCHED_HDR);
5051 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5052 &v, 1, A_TP_TX_SCHED_FIFO);
5053 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5054 &v, 1, A_TP_TX_SCHED_PCMD);
5056 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5057 if (is_offload(adap)) {
5058 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5059 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5060 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5061 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5062 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5063 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5064 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5065 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5066 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5067 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5070 /* get basic stuff going */
5071 return t4_early_init(adap, adap->fn);
5075 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5077 #define MAX_ATIDS 8192U
5080 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5082 * If the firmware we're dealing with has Configuration File support, then
5083 * we use that to perform all configuration
5087 * Tweak configuration based on module parameters, etc. Most of these have
5088 * defaults assigned to them by Firmware Configuration Files (if we're using
5089 * them) but need to be explicitly set if we're using hard-coded
5090 * initialization. But even in the case of using Firmware Configuration
5091 * Files, we'd like to expose the ability to change these via module
5092 * parameters so these are essentially common tweaks/settings for
5093 * Configuration Files and hard-coded initialization ...
5095 static int adap_init0_tweaks(struct adapter *adapter)
5098 * Fix up various Host-Dependent Parameters like Page Size, Cache
5099 * Line Size, etc. The firmware default is for a 4KB Page Size and
5100 * 64B Cache Line Size ...
5102 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5105 * Process module parameters which affect early initialization.
5107 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5108 dev_err(&adapter->pdev->dev,
5109 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5113 t4_set_reg_field(adapter, SGE_CONTROL,
5115 PKTSHIFT(rx_dma_offset));
5118 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5119 * adds the pseudo header itself.
5121 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5122 CSUM_HAS_PSEUDO_HDR, 0);
5128 * Attempt to initialize the adapter via a Firmware Configuration File.
5130 static int adap_init0_config(struct adapter *adapter, int reset)
5132 struct fw_caps_config_cmd caps_cmd;
5133 const struct firmware *cf;
5134 unsigned long mtype = 0, maddr = 0;
5135 u32 finiver, finicsum, cfcsum;
5137 int config_issued = 0;
5138 char *fw_config_file, fw_config_file_path[256];
5139 char *config_name = NULL;
5142 * Reset device if necessary.
5145 ret = t4_fw_reset(adapter, adapter->mbox,
5146 PIORSTMODE | PIORST);
5152 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5153 * then use that. Otherwise, use the configuration file stored
5154 * in the adapter flash ...
5156 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5158 fw_config_file = FW4_CFNAME;
5161 fw_config_file = FW5_CFNAME;
5164 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5165 adapter->pdev->device);
5170 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5172 config_name = "On FLASH";
5173 mtype = FW_MEMTYPE_CF_FLASH;
5174 maddr = t4_flash_cfg_addr(adapter);
5176 u32 params[7], val[7];
5178 sprintf(fw_config_file_path,
5179 "/lib/firmware/%s", fw_config_file);
5180 config_name = fw_config_file_path;
5182 if (cf->size >= FLASH_CFG_MAX_SIZE)
5185 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5186 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5187 ret = t4_query_params(adapter, adapter->mbox,
5188 adapter->fn, 0, 1, params, val);
5191 * For t4_memory_rw() below addresses and
5192 * sizes have to be in terms of multiples of 4
5193 * bytes. So, if the Configuration File isn't
5194 * a multiple of 4 bytes in length we'll have
5195 * to write that out separately since we can't
5196 * guarantee that the bytes following the
5197 * residual byte in the buffer returned by
5198 * request_firmware() are zeroed out ...
5200 size_t resid = cf->size & 0x3;
5201 size_t size = cf->size & ~0x3;
5202 __be32 *data = (__be32 *)cf->data;
5204 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5205 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5207 spin_lock(&adapter->win0_lock);
5208 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5209 size, data, T4_MEMORY_WRITE);
5210 if (ret == 0 && resid != 0) {
5217 last.word = data[size >> 2];
5218 for (i = resid; i < 4; i++)
5220 ret = t4_memory_rw(adapter, 0, mtype,
5225 spin_unlock(&adapter->win0_lock);
5229 release_firmware(cf);
5235 * Issue a Capability Configuration command to the firmware to get it
5236 * to parse the Configuration File. We don't use t4_fw_config_file()
5237 * because we want the ability to modify various features after we've
5238 * processed the configuration file ...
5240 memset(&caps_cmd, 0, sizeof(caps_cmd));
5241 caps_cmd.op_to_write =
5242 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5245 caps_cmd.cfvalid_to_len16 =
5246 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5247 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5248 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5249 FW_LEN16(caps_cmd));
5250 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5253 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5254 * Configuration File in FLASH), our last gasp effort is to use the
5255 * Firmware Configuration File which is embedded in the firmware. A
5256 * very few early versions of the firmware didn't have one embedded
5257 * but we can ignore those.
5259 if (ret == -ENOENT) {
5260 memset(&caps_cmd, 0, sizeof(caps_cmd));
5261 caps_cmd.op_to_write =
5262 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5265 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5266 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5267 sizeof(caps_cmd), &caps_cmd);
5268 config_name = "Firmware Default";
5275 finiver = ntohl(caps_cmd.finiver);
5276 finicsum = ntohl(caps_cmd.finicsum);
5277 cfcsum = ntohl(caps_cmd.cfcsum);
5278 if (finicsum != cfcsum)
5279 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5280 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5284 * And now tell the firmware to use the configuration we just loaded.
5286 caps_cmd.op_to_write =
5287 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5290 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5291 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5297 * Tweak configuration based on system architecture, module
5300 ret = adap_init0_tweaks(adapter);
5305 * And finally tell the firmware to initialize itself using the
5306 * parameters from the Configuration File.
5308 ret = t4_fw_initialize(adapter, adapter->mbox);
5313 * Return successfully and note that we're operating with parameters
5314 * not supplied by the driver, rather than from hard-wired
5315 * initialization constants burried in the driver.
5317 adapter->flags |= USING_SOFT_PARAMS;
5318 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5319 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5320 config_name, finiver, cfcsum);
5324 * Something bad happened. Return the error ... (If the "error"
5325 * is that there's no Configuration File on the adapter we don't
5326 * want to issue a warning since this is fairly common.)
5329 if (config_issued && ret != -ENOENT)
5330 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5336 * Attempt to initialize the adapter via hard-coded, driver supplied
5339 static int adap_init0_no_config(struct adapter *adapter, int reset)
5341 struct sge *s = &adapter->sge;
5342 struct fw_caps_config_cmd caps_cmd;
5347 * Reset device if necessary
5350 ret = t4_fw_reset(adapter, adapter->mbox,
5351 PIORSTMODE | PIORST);
5357 * Get device capabilities and select which we'll be using.
5359 memset(&caps_cmd, 0, sizeof(caps_cmd));
5360 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5361 FW_CMD_REQUEST | FW_CMD_READ);
5362 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5363 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5368 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5370 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5372 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5373 } else if (vf_acls) {
5374 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5377 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5378 FW_CMD_REQUEST | FW_CMD_WRITE);
5379 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5385 * Tweak configuration based on system architecture, module
5388 ret = adap_init0_tweaks(adapter);
5393 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5394 * mode which maps each Virtual Interface to its own section of
5395 * the RSS Table and we turn on all map and hash enables ...
5397 adapter->flags |= RSS_TNLALLLOOKUP;
5398 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5399 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5400 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5401 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5402 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5403 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5408 * Set up our own fundamental resource provisioning ...
5410 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5411 PFRES_NEQ, PFRES_NETHCTRL,
5412 PFRES_NIQFLINT, PFRES_NIQ,
5413 PFRES_TC, PFRES_NVI,
5414 FW_PFVF_CMD_CMASK_MASK,
5415 pfvfres_pmask(adapter, adapter->fn, 0),
5417 PFRES_R_CAPS, PFRES_WX_CAPS);
5422 * Perform low level SGE initialization. We need to do this before we
5423 * send the firmware the INITIALIZE command because that will cause
5424 * any other PF Drivers which are waiting for the Master
5425 * Initialization to proceed forward.
5427 for (i = 0; i < SGE_NTIMERS - 1; i++)
5428 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5429 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5430 s->counter_val[0] = 1;
5431 for (i = 1; i < SGE_NCOUNTERS; i++)
5432 s->counter_val[i] = min(intr_cnt[i - 1],
5433 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5434 t4_sge_init(adapter);
5436 #ifdef CONFIG_PCI_IOV
5438 * Provision resource limits for Virtual Functions. We currently
5439 * grant them all the same static resource limits except for the Port
5440 * Access Rights Mask which we're assigning based on the PF. All of
5441 * the static provisioning stuff for both the PF and VF really needs
5442 * to be managed in a persistent manner for each device which the
5443 * firmware controls.
5448 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5449 if (num_vf[pf] <= 0)
5452 /* VF numbering starts at 1! */
5453 for (vf = 1; vf <= num_vf[pf]; vf++) {
5454 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5456 VFRES_NEQ, VFRES_NETHCTRL,
5457 VFRES_NIQFLINT, VFRES_NIQ,
5458 VFRES_TC, VFRES_NVI,
5459 FW_PFVF_CMD_CMASK_MASK,
5463 VFRES_R_CAPS, VFRES_WX_CAPS);
5465 dev_warn(adapter->pdev_dev,
5467 "provision pf/vf=%d/%d; "
5468 "err=%d\n", pf, vf, ret);
5475 * Set up the default filter mode. Later we'll want to implement this
5476 * via a firmware command, etc. ... This needs to be done before the
5477 * firmare initialization command ... If the selected set of fields
5478 * isn't equal to the default value, we'll need to make sure that the
5479 * field selections will fit in the 36-bit budget.
5481 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5484 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5485 switch (tp_vlan_pri_map & (1 << j)) {
5487 /* compressed filter field not enabled */
5507 case ETHERTYPE_MASK:
5513 case MPSHITTYPE_MASK:
5516 case FRAGMENTATION_MASK:
5522 dev_err(adapter->pdev_dev,
5523 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5524 " using %#x\n", tp_vlan_pri_map, bits,
5525 TP_VLAN_PRI_MAP_DEFAULT);
5526 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5529 v = tp_vlan_pri_map;
5530 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5531 &v, 1, TP_VLAN_PRI_MAP);
5534 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5535 * to support any of the compressed filter fields above. Newer
5536 * versions of the firmware do this automatically but it doesn't hurt
5537 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5538 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5539 * since the firmware automatically turns this on and off when we have
5540 * a non-zero number of filters active (since it does have a
5541 * performance impact).
5543 if (tp_vlan_pri_map)
5544 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5545 FIVETUPLELOOKUP_MASK,
5546 FIVETUPLELOOKUP_MASK);
5549 * Tweak some settings.
5551 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5552 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5553 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5554 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5557 * Get basic stuff going by issuing the Firmware Initialize command.
5558 * Note that this _must_ be after all PFVF commands ...
5560 ret = t4_fw_initialize(adapter, adapter->mbox);
5565 * Return successfully!
5567 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5568 "driver parameters\n");
5572 * Something bad happened. Return the error ...
5578 static struct fw_info fw_info_array[] = {
5581 .fs_name = FW4_CFNAME,
5582 .fw_mod_name = FW4_FNAME,
5584 .chip = FW_HDR_CHIP_T4,
5585 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5586 .intfver_nic = FW_INTFVER(T4, NIC),
5587 .intfver_vnic = FW_INTFVER(T4, VNIC),
5588 .intfver_ri = FW_INTFVER(T4, RI),
5589 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5590 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5594 .fs_name = FW5_CFNAME,
5595 .fw_mod_name = FW5_FNAME,
5597 .chip = FW_HDR_CHIP_T5,
5598 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5599 .intfver_nic = FW_INTFVER(T5, NIC),
5600 .intfver_vnic = FW_INTFVER(T5, VNIC),
5601 .intfver_ri = FW_INTFVER(T5, RI),
5602 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5603 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5608 static struct fw_info *find_fw_info(int chip)
5612 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5613 if (fw_info_array[i].chip == chip)
5614 return &fw_info_array[i];
5620 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5622 static int adap_init0(struct adapter *adap)
5626 enum dev_state state;
5627 u32 params[7], val[7];
5628 struct fw_caps_config_cmd caps_cmd;
5632 * Contact FW, advertising Master capability (and potentially forcing
5633 * ourselves as the Master PF if our module parameter force_init is
5636 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5637 force_init ? MASTER_MUST : MASTER_MAY,
5640 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5644 if (ret == adap->mbox)
5645 adap->flags |= MASTER_PF;
5646 if (force_init && state == DEV_STATE_INIT)
5647 state = DEV_STATE_UNINIT;
5650 * If we're the Master PF Driver and the device is uninitialized,
5651 * then let's consider upgrading the firmware ... (We always want
5652 * to check the firmware version number in order to A. get it for
5653 * later reporting and B. to warn if the currently loaded firmware
5654 * is excessively mismatched relative to the driver.)
5656 t4_get_fw_version(adap, &adap->params.fw_vers);
5657 t4_get_tp_version(adap, &adap->params.tp_vers);
5658 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5659 struct fw_info *fw_info;
5660 struct fw_hdr *card_fw;
5661 const struct firmware *fw;
5662 const u8 *fw_data = NULL;
5663 unsigned int fw_size = 0;
5665 /* This is the firmware whose headers the driver was compiled
5668 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5669 if (fw_info == NULL) {
5670 dev_err(adap->pdev_dev,
5671 "unable to get firmware info for chip %d.\n",
5672 CHELSIO_CHIP_VERSION(adap->params.chip));
5676 /* allocate memory to read the header of the firmware on the
5679 card_fw = t4_alloc_mem(sizeof(*card_fw));
5681 /* Get FW from from /lib/firmware/ */
5682 ret = request_firmware(&fw, fw_info->fw_mod_name,
5685 dev_err(adap->pdev_dev,
5686 "unable to load firmware image %s, error %d\n",
5687 fw_info->fw_mod_name, ret);
5693 /* upgrade FW logic */
5694 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5699 release_firmware(fw);
5700 t4_free_mem(card_fw);
5707 * Grab VPD parameters. This should be done after we establish a
5708 * connection to the firmware since some of the VPD parameters
5709 * (notably the Core Clock frequency) are retrieved via requests to
5710 * the firmware. On the other hand, we need these fairly early on
5711 * so we do this right after getting ahold of the firmware.
5713 ret = get_vpd_params(adap, &adap->params.vpd);
5718 * Find out what ports are available to us. Note that we need to do
5719 * this before calling adap_init0_no_config() since it needs nports
5723 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5724 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5725 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5729 adap->params.nports = hweight32(port_vec);
5730 adap->params.portvec = port_vec;
5733 * If the firmware is initialized already (and we're not forcing a
5734 * master initialization), note that we're living with existing
5735 * adapter parameters. Otherwise, it's time to try initializing the
5738 if (state == DEV_STATE_INIT) {
5739 dev_info(adap->pdev_dev, "Coming up as %s: "\
5740 "Adapter already initialized\n",
5741 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5742 adap->flags |= USING_SOFT_PARAMS;
5744 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5745 "Initializing adapter\n");
5748 * If the firmware doesn't support Configuration
5749 * Files warn user and exit,
5752 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5753 "configuration file.\n");
5755 ret = adap_init0_no_config(adap, reset);
5758 * Find out whether we're dealing with a version of
5759 * the firmware which has configuration file support.
5761 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5762 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5763 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5767 * If the firmware doesn't support Configuration
5768 * Files, use the old Driver-based, hard-wired
5769 * initialization. Otherwise, try using the
5770 * Configuration File support and fall back to the
5771 * Driver-based initialization if there's no
5772 * Configuration File found.
5775 ret = adap_init0_no_config(adap, reset);
5778 * The firmware provides us with a memory
5779 * buffer where we can load a Configuration
5780 * File from the host if we want to override
5781 * the Configuration File in flash.
5784 ret = adap_init0_config(adap, reset);
5785 if (ret == -ENOENT) {
5786 dev_info(adap->pdev_dev,
5787 "No Configuration File present "
5788 "on adapter. Using hard-wired "
5789 "configuration parameters.\n");
5790 ret = adap_init0_no_config(adap, reset);
5795 dev_err(adap->pdev_dev,
5796 "could not initialize adapter, error %d\n",
5803 * If we're living with non-hard-coded parameters (either from a
5804 * Firmware Configuration File or values programmed by a different PF
5805 * Driver), give the SGE code a chance to pull in anything that it
5806 * needs ... Note that this must be called after we retrieve our VPD
5807 * parameters in order to know how to convert core ticks to seconds.
5809 if (adap->flags & USING_SOFT_PARAMS) {
5810 ret = t4_sge_init(adap);
5815 if (is_bypass_device(adap->pdev->device))
5816 adap->params.bypass = 1;
5819 * Grab some of our basic fundamental operating parameters.
5821 #define FW_PARAM_DEV(param) \
5822 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5823 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5825 #define FW_PARAM_PFVF(param) \
5826 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5827 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5828 FW_PARAMS_PARAM_Y(0) | \
5829 FW_PARAMS_PARAM_Z(0)
5831 params[0] = FW_PARAM_PFVF(EQ_START);
5832 params[1] = FW_PARAM_PFVF(L2T_START);
5833 params[2] = FW_PARAM_PFVF(L2T_END);
5834 params[3] = FW_PARAM_PFVF(FILTER_START);
5835 params[4] = FW_PARAM_PFVF(FILTER_END);
5836 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5837 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5840 adap->sge.egr_start = val[0];
5841 adap->l2t_start = val[1];
5842 adap->l2t_end = val[2];
5843 adap->tids.ftid_base = val[3];
5844 adap->tids.nftids = val[4] - val[3] + 1;
5845 adap->sge.ingr_start = val[5];
5847 /* query params related to active filter region */
5848 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5849 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5850 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5851 /* If Active filter size is set we enable establishing
5852 * offload connection through firmware work request
5854 if ((val[0] != val[1]) && (ret >= 0)) {
5855 adap->flags |= FW_OFLD_CONN;
5856 adap->tids.aftid_base = val[0];
5857 adap->tids.aftid_end = val[1];
5860 /* If we're running on newer firmware, let it know that we're
5861 * prepared to deal with encapsulated CPL messages. Older
5862 * firmware won't understand this and we'll just get
5863 * unencapsulated messages ...
5865 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5867 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5870 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5871 * capability. Earlier versions of the firmware didn't have the
5872 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5873 * permission to use ULPTX MEMWRITE DSGL.
5875 if (is_t4(adap->params.chip)) {
5876 adap->params.ulptx_memwrite_dsgl = false;
5878 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5879 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5881 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5885 * Get device capabilities so we can determine what resources we need
5888 memset(&caps_cmd, 0, sizeof(caps_cmd));
5889 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5890 FW_CMD_REQUEST | FW_CMD_READ);
5891 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5892 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5897 if (caps_cmd.ofldcaps) {
5898 /* query offload-related parameters */
5899 params[0] = FW_PARAM_DEV(NTID);
5900 params[1] = FW_PARAM_PFVF(SERVER_START);
5901 params[2] = FW_PARAM_PFVF(SERVER_END);
5902 params[3] = FW_PARAM_PFVF(TDDP_START);
5903 params[4] = FW_PARAM_PFVF(TDDP_END);
5904 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5905 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5909 adap->tids.ntids = val[0];
5910 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5911 adap->tids.stid_base = val[1];
5912 adap->tids.nstids = val[2] - val[1] + 1;
5914 * Setup server filter region. Divide the availble filter
5915 * region into two parts. Regular filters get 1/3rd and server
5916 * filters get 2/3rd part. This is only enabled if workarond
5918 * 1. For regular filters.
5919 * 2. Server filter: This are special filters which are used
5920 * to redirect SYN packets to offload queue.
5922 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5923 adap->tids.sftid_base = adap->tids.ftid_base +
5924 DIV_ROUND_UP(adap->tids.nftids, 3);
5925 adap->tids.nsftids = adap->tids.nftids -
5926 DIV_ROUND_UP(adap->tids.nftids, 3);
5927 adap->tids.nftids = adap->tids.sftid_base -
5928 adap->tids.ftid_base;
5930 adap->vres.ddp.start = val[3];
5931 adap->vres.ddp.size = val[4] - val[3] + 1;
5932 adap->params.ofldq_wr_cred = val[5];
5934 adap->params.offload = 1;
5936 if (caps_cmd.rdmacaps) {
5937 params[0] = FW_PARAM_PFVF(STAG_START);
5938 params[1] = FW_PARAM_PFVF(STAG_END);
5939 params[2] = FW_PARAM_PFVF(RQ_START);
5940 params[3] = FW_PARAM_PFVF(RQ_END);
5941 params[4] = FW_PARAM_PFVF(PBL_START);
5942 params[5] = FW_PARAM_PFVF(PBL_END);
5943 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5947 adap->vres.stag.start = val[0];
5948 adap->vres.stag.size = val[1] - val[0] + 1;
5949 adap->vres.rq.start = val[2];
5950 adap->vres.rq.size = val[3] - val[2] + 1;
5951 adap->vres.pbl.start = val[4];
5952 adap->vres.pbl.size = val[5] - val[4] + 1;
5954 params[0] = FW_PARAM_PFVF(SQRQ_START);
5955 params[1] = FW_PARAM_PFVF(SQRQ_END);
5956 params[2] = FW_PARAM_PFVF(CQ_START);
5957 params[3] = FW_PARAM_PFVF(CQ_END);
5958 params[4] = FW_PARAM_PFVF(OCQ_START);
5959 params[5] = FW_PARAM_PFVF(OCQ_END);
5960 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5963 adap->vres.qp.start = val[0];
5964 adap->vres.qp.size = val[1] - val[0] + 1;
5965 adap->vres.cq.start = val[2];
5966 adap->vres.cq.size = val[3] - val[2] + 1;
5967 adap->vres.ocq.start = val[4];
5968 adap->vres.ocq.size = val[5] - val[4] + 1;
5970 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5971 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5972 ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
5974 adap->params.max_ordird_qp = 8;
5975 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5978 adap->params.max_ordird_qp = val[0];
5979 adap->params.max_ird_adapter = val[1];
5981 dev_info(adap->pdev_dev,
5982 "max_ordird_qp %d max_ird_adapter %d\n",
5983 adap->params.max_ordird_qp,
5984 adap->params.max_ird_adapter);
5986 if (caps_cmd.iscsicaps) {
5987 params[0] = FW_PARAM_PFVF(ISCSI_START);
5988 params[1] = FW_PARAM_PFVF(ISCSI_END);
5989 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5993 adap->vres.iscsi.start = val[0];
5994 adap->vres.iscsi.size = val[1] - val[0] + 1;
5996 #undef FW_PARAM_PFVF
5999 /* The MTU/MSS Table is initialized by now, so load their values. If
6000 * we're initializing the adapter, then we'll make any modifications
6001 * we want to the MTU/MSS Table and also initialize the congestion
6004 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
6005 if (state != DEV_STATE_INIT) {
6008 /* The default MTU Table contains values 1492 and 1500.
6009 * However, for TCP, it's better to have two values which are
6010 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6011 * This allows us to have a TCP Data Payload which is a
6012 * multiple of 8 regardless of what combination of TCP Options
6013 * are in use (always a multiple of 4 bytes) which is
6014 * important for performance reasons. For instance, if no
6015 * options are in use, then we have a 20-byte IP header and a
6016 * 20-byte TCP header. In this case, a 1500-byte MSS would
6017 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6018 * which is not a multiple of 8. So using an MSS of 1488 in
6019 * this case results in a TCP Data Payload of 1448 bytes which
6020 * is a multiple of 8. On the other hand, if 12-byte TCP Time
6021 * Stamps have been negotiated, then an MTU of 1500 bytes
6022 * results in a TCP Data Payload of 1448 bytes which, as
6023 * above, is a multiple of 8 bytes ...
6025 for (i = 0; i < NMTUS; i++)
6026 if (adap->params.mtus[i] == 1492) {
6027 adap->params.mtus[i] = 1488;
6031 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6032 adap->params.b_wnd);
6034 t4_init_tp_params(adap);
6035 adap->flags |= FW_OK;
6039 * Something bad happened. If a command timed out or failed with EIO
6040 * FW does not operate within its spec or something catastrophic
6041 * happened to HW/FW, stop issuing commands.
6044 if (ret != -ETIMEDOUT && ret != -EIO)
6045 t4_fw_bye(adap, adap->mbox);
6051 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6052 pci_channel_state_t state)
6055 struct adapter *adap = pci_get_drvdata(pdev);
6061 adap->flags &= ~FW_OK;
6062 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
6063 spin_lock(&adap->stats_lock);
6064 for_each_port(adap, i) {
6065 struct net_device *dev = adap->port[i];
6067 netif_device_detach(dev);
6068 netif_carrier_off(dev);
6070 spin_unlock(&adap->stats_lock);
6071 if (adap->flags & FULL_INIT_DONE)
6074 if ((adap->flags & DEV_ENABLED)) {
6075 pci_disable_device(pdev);
6076 adap->flags &= ~DEV_ENABLED;
6078 out: return state == pci_channel_io_perm_failure ?
6079 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6082 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6085 struct fw_caps_config_cmd c;
6086 struct adapter *adap = pci_get_drvdata(pdev);
6089 pci_restore_state(pdev);
6090 pci_save_state(pdev);
6091 return PCI_ERS_RESULT_RECOVERED;
6094 if (!(adap->flags & DEV_ENABLED)) {
6095 if (pci_enable_device(pdev)) {
6096 dev_err(&pdev->dev, "Cannot reenable PCI "
6097 "device after reset\n");
6098 return PCI_ERS_RESULT_DISCONNECT;
6100 adap->flags |= DEV_ENABLED;
6103 pci_set_master(pdev);
6104 pci_restore_state(pdev);
6105 pci_save_state(pdev);
6106 pci_cleanup_aer_uncorrect_error_status(pdev);
6108 if (t4_wait_dev_ready(adap) < 0)
6109 return PCI_ERS_RESULT_DISCONNECT;
6110 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6111 return PCI_ERS_RESULT_DISCONNECT;
6112 adap->flags |= FW_OK;
6113 if (adap_init1(adap, &c))
6114 return PCI_ERS_RESULT_DISCONNECT;
6116 for_each_port(adap, i) {
6117 struct port_info *p = adap2pinfo(adap, i);
6119 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6122 return PCI_ERS_RESULT_DISCONNECT;
6124 p->xact_addr_filt = -1;
6127 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6128 adap->params.b_wnd);
6131 return PCI_ERS_RESULT_DISCONNECT;
6132 return PCI_ERS_RESULT_RECOVERED;
6135 static void eeh_resume(struct pci_dev *pdev)
6138 struct adapter *adap = pci_get_drvdata(pdev);
6144 for_each_port(adap, i) {
6145 struct net_device *dev = adap->port[i];
6147 if (netif_running(dev)) {
6149 cxgb_set_rxmode(dev);
6151 netif_device_attach(dev);
6156 static const struct pci_error_handlers cxgb4_eeh = {
6157 .error_detected = eeh_err_detected,
6158 .slot_reset = eeh_slot_reset,
6159 .resume = eeh_resume,
6162 static inline bool is_x_10g_port(const struct link_config *lc)
6164 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6165 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6168 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6169 unsigned int us, unsigned int cnt,
6170 unsigned int size, unsigned int iqe_size)
6173 set_rspq_intr_params(q, us, cnt);
6174 q->iqe_len = iqe_size;
6179 * Perform default configuration of DMA queues depending on the number and type
6180 * of ports we found and the number of available CPUs. Most settings can be
6181 * modified by the admin prior to actual use.
6183 static void cfg_queues(struct adapter *adap)
6185 struct sge *s = &adap->sge;
6186 int i, n10g = 0, qidx = 0;
6187 #ifndef CONFIG_CHELSIO_T4_DCB
6192 for_each_port(adap, i)
6193 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6194 #ifdef CONFIG_CHELSIO_T4_DCB
6195 /* For Data Center Bridging support we need to be able to support up
6196 * to 8 Traffic Priorities; each of which will be assigned to its
6197 * own TX Queue in order to prevent Head-Of-Line Blocking.
6199 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6200 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6201 MAX_ETH_QSETS, adap->params.nports * 8);
6205 for_each_port(adap, i) {
6206 struct port_info *pi = adap2pinfo(adap, i);
6208 pi->first_qset = qidx;
6212 #else /* !CONFIG_CHELSIO_T4_DCB */
6214 * We default to 1 queue per non-10G port and up to # of cores queues
6218 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6219 if (q10g > netif_get_num_default_rss_queues())
6220 q10g = netif_get_num_default_rss_queues();
6222 for_each_port(adap, i) {
6223 struct port_info *pi = adap2pinfo(adap, i);
6225 pi->first_qset = qidx;
6226 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6229 #endif /* !CONFIG_CHELSIO_T4_DCB */
6232 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6234 if (is_offload(adap)) {
6236 * For offload we use 1 queue/channel if all ports are up to 1G,
6237 * otherwise we divide all available queues amongst the channels
6238 * capped by the number of available cores.
6241 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6243 s->ofldqsets = roundup(i, adap->params.nports);
6245 s->ofldqsets = adap->params.nports;
6246 /* For RDMA one Rx queue per channel suffices */
6247 s->rdmaqs = adap->params.nports;
6248 s->rdmaciqs = adap->params.nports;
6251 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6252 struct sge_eth_rxq *r = &s->ethrxq[i];
6254 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6258 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6259 s->ethtxq[i].q.size = 1024;
6261 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6262 s->ctrlq[i].q.size = 512;
6264 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6265 s->ofldtxq[i].q.size = 1024;
6267 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6268 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6270 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6271 r->rspq.uld = CXGB4_ULD_ISCSI;
6275 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6276 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6278 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6279 r->rspq.uld = CXGB4_ULD_RDMA;
6283 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6284 if (ciq_size > SGE_MAX_IQ_SIZE) {
6285 CH_WARN(adap, "CIQ size too small for available IQs\n");
6286 ciq_size = SGE_MAX_IQ_SIZE;
6289 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6290 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6292 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6293 r->rspq.uld = CXGB4_ULD_RDMA;
6296 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6297 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6301 * Reduce the number of Ethernet queues across all ports to at most n.
6302 * n provides at least one queue per port.
6304 static void reduce_ethqs(struct adapter *adap, int n)
6307 struct port_info *pi;
6309 while (n < adap->sge.ethqsets)
6310 for_each_port(adap, i) {
6311 pi = adap2pinfo(adap, i);
6312 if (pi->nqsets > 1) {
6314 adap->sge.ethqsets--;
6315 if (adap->sge.ethqsets <= n)
6321 for_each_port(adap, i) {
6322 pi = adap2pinfo(adap, i);
6328 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6329 #define EXTRA_VECS 2
6331 static int enable_msix(struct adapter *adap)
6335 struct sge *s = &adap->sge;
6336 unsigned int nchan = adap->params.nports;
6337 struct msix_entry entries[MAX_INGQ + 1];
6339 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6340 entries[i].entry = i;
6342 want = s->max_ethqsets + EXTRA_VECS;
6343 if (is_offload(adap)) {
6344 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6345 /* need nchan for each possible ULD */
6346 ofld_need = 3 * nchan;
6348 #ifdef CONFIG_CHELSIO_T4_DCB
6349 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6352 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6354 need = adap->params.nports + EXTRA_VECS + ofld_need;
6356 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6361 * Distribute available vectors to the various queue groups.
6362 * Every group gets its minimum requirement and NIC gets top
6363 * priority for leftovers.
6365 i = want - EXTRA_VECS - ofld_need;
6366 if (i < s->max_ethqsets) {
6367 s->max_ethqsets = i;
6368 if (i < s->ethqsets)
6369 reduce_ethqs(adap, i);
6371 if (is_offload(adap)) {
6372 i = want - EXTRA_VECS - s->max_ethqsets;
6373 i -= ofld_need - nchan;
6374 s->ofldqsets = (i / nchan) * nchan; /* round down */
6376 for (i = 0; i < want; ++i)
6377 adap->msix_info[i].vec = entries[i].vector;
6384 static int init_rss(struct adapter *adap)
6388 for_each_port(adap, i) {
6389 struct port_info *pi = adap2pinfo(adap, i);
6391 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6394 for (j = 0; j < pi->rss_size; j++)
6395 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6400 static void print_port_info(const struct net_device *dev)
6404 const char *spd = "";
6405 const struct port_info *pi = netdev_priv(dev);
6406 const struct adapter *adap = pi->adapter;
6408 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6410 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6412 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6415 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6416 bufp += sprintf(bufp, "100/");
6417 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6418 bufp += sprintf(bufp, "1000/");
6419 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6420 bufp += sprintf(bufp, "10G/");
6421 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6422 bufp += sprintf(bufp, "40G/");
6425 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6427 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6428 adap->params.vpd.id,
6429 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6430 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6431 (adap->flags & USING_MSIX) ? " MSI-X" :
6432 (adap->flags & USING_MSI) ? " MSI" : "");
6433 netdev_info(dev, "S/N: %s, P/N: %s\n",
6434 adap->params.vpd.sn, adap->params.vpd.pn);
6437 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6439 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6443 * Free the following resources:
6444 * - memory used for tables
6447 * - resources FW is holding for us
6449 static void free_some_resources(struct adapter *adapter)
6453 t4_free_mem(adapter->l2t);
6454 t4_free_mem(adapter->tids.tid_tab);
6455 disable_msi(adapter);
6457 for_each_port(adapter, i)
6458 if (adapter->port[i]) {
6459 kfree(adap2pinfo(adapter, i)->rss);
6460 free_netdev(adapter->port[i]);
6462 if (adapter->flags & FW_OK)
6463 t4_fw_bye(adapter, adapter->fn);
6466 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6467 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6468 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6469 #define SEGMENT_SIZE 128
6471 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6473 int func, i, err, s_qpp, qpp, num_seg;
6474 struct port_info *pi;
6475 bool highdma = false;
6476 struct adapter *adapter = NULL;
6478 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6480 err = pci_request_regions(pdev, KBUILD_MODNAME);
6482 /* Just info, some other driver may have claimed the device. */
6483 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6487 err = pci_enable_device(pdev);
6489 dev_err(&pdev->dev, "cannot enable PCI device\n");
6490 goto out_release_regions;
6493 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6495 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6497 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6498 "coherent allocations\n");
6499 goto out_disable_device;
6502 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6504 dev_err(&pdev->dev, "no usable DMA configuration\n");
6505 goto out_disable_device;
6509 pci_enable_pcie_error_reporting(pdev);
6510 enable_pcie_relaxed_ordering(pdev);
6511 pci_set_master(pdev);
6512 pci_save_state(pdev);
6514 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6517 goto out_disable_device;
6520 /* PCI device has been enabled */
6521 adapter->flags |= DEV_ENABLED;
6523 adapter->regs = pci_ioremap_bar(pdev, 0);
6524 if (!adapter->regs) {
6525 dev_err(&pdev->dev, "cannot map device registers\n");
6527 goto out_free_adapter;
6530 /* We control everything through one PF */
6531 func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
6532 if (func != ent->driver_data) {
6533 pci_save_state(pdev); /* to restore SR-IOV later */
6537 adapter->pdev = pdev;
6538 adapter->pdev_dev = &pdev->dev;
6539 adapter->mbox = func;
6541 adapter->msg_enable = dflt_msg_enable;
6542 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6544 spin_lock_init(&adapter->stats_lock);
6545 spin_lock_init(&adapter->tid_release_lock);
6547 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6548 INIT_WORK(&adapter->db_full_task, process_db_full);
6549 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6551 err = t4_prep_adapter(adapter);
6553 goto out_unmap_bar0;
6555 if (!is_t4(adapter->params.chip)) {
6556 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6557 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6558 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6559 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6561 /* Each segment size is 128B. Write coalescing is enabled only
6562 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6563 * queue is less no of segments that can be accommodated in
6566 if (qpp > num_seg) {
6568 "Incorrect number of egress queues per page\n");
6570 goto out_unmap_bar0;
6572 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6573 pci_resource_len(pdev, 2));
6574 if (!adapter->bar2) {
6575 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6577 goto out_unmap_bar0;
6581 setup_memwin(adapter);
6582 err = adap_init0(adapter);
6583 setup_memwin_rdma(adapter);
6587 for_each_port(adapter, i) {
6588 struct net_device *netdev;
6590 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6597 SET_NETDEV_DEV(netdev, &pdev->dev);
6599 adapter->port[i] = netdev;
6600 pi = netdev_priv(netdev);
6601 pi->adapter = adapter;
6602 pi->xact_addr_filt = -1;
6604 netdev->irq = pdev->irq;
6606 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6607 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6608 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6609 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6611 netdev->hw_features |= NETIF_F_HIGHDMA;
6612 netdev->features |= netdev->hw_features;
6613 netdev->vlan_features = netdev->features & VLAN_FEAT;
6615 netdev->priv_flags |= IFF_UNICAST_FLT;
6617 netdev->netdev_ops = &cxgb4_netdev_ops;
6618 #ifdef CONFIG_CHELSIO_T4_DCB
6619 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6620 cxgb4_dcb_state_init(netdev);
6622 netdev->ethtool_ops = &cxgb_ethtool_ops;
6625 pci_set_drvdata(pdev, adapter);
6627 if (adapter->flags & FW_OK) {
6628 err = t4_port_init(adapter, func, func, 0);
6634 * Configure queues and allocate tables now, they can be needed as
6635 * soon as the first register_netdev completes.
6637 cfg_queues(adapter);
6639 adapter->l2t = t4_init_l2t();
6640 if (!adapter->l2t) {
6641 /* We tolerate a lack of L2T, giving up some functionality */
6642 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6643 adapter->params.offload = 0;
6646 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6647 dev_warn(&pdev->dev, "could not allocate TID table, "
6649 adapter->params.offload = 0;
6652 /* See what interrupts we'll be using */
6653 if (msi > 1 && enable_msix(adapter) == 0)
6654 adapter->flags |= USING_MSIX;
6655 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6656 adapter->flags |= USING_MSI;
6658 err = init_rss(adapter);
6663 * The card is now ready to go. If any errors occur during device
6664 * registration we do not fail the whole card but rather proceed only
6665 * with the ports we manage to register successfully. However we must
6666 * register at least one net device.
6668 for_each_port(adapter, i) {
6669 pi = adap2pinfo(adapter, i);
6670 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6671 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6673 err = register_netdev(adapter->port[i]);
6676 adapter->chan_map[pi->tx_chan] = i;
6677 print_port_info(adapter->port[i]);
6680 dev_err(&pdev->dev, "could not register any net devices\n");
6684 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6688 if (cxgb4_debugfs_root) {
6689 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6690 cxgb4_debugfs_root);
6691 setup_debugfs(adapter);
6694 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6695 pdev->needs_freset = 1;
6697 if (is_offload(adapter))
6698 attach_ulds(adapter);
6701 #ifdef CONFIG_PCI_IOV
6702 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6703 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6704 dev_info(&pdev->dev,
6705 "instantiated %u virtual functions\n",
6711 free_some_resources(adapter);
6713 if (!is_t4(adapter->params.chip))
6714 iounmap(adapter->bar2);
6716 iounmap(adapter->regs);
6720 pci_disable_pcie_error_reporting(pdev);
6721 pci_disable_device(pdev);
6722 out_release_regions:
6723 pci_release_regions(pdev);
6727 static void remove_one(struct pci_dev *pdev)
6729 struct adapter *adapter = pci_get_drvdata(pdev);
6731 #ifdef CONFIG_PCI_IOV
6732 pci_disable_sriov(pdev);
6739 if (is_offload(adapter))
6740 detach_ulds(adapter);
6742 for_each_port(adapter, i)
6743 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6744 unregister_netdev(adapter->port[i]);
6746 debugfs_remove_recursive(adapter->debugfs_root);
6748 /* If we allocated filters, free up state associated with any
6751 if (adapter->tids.ftid_tab) {
6752 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6753 for (i = 0; i < (adapter->tids.nftids +
6754 adapter->tids.nsftids); i++, f++)
6756 clear_filter(adapter, f);
6759 if (adapter->flags & FULL_INIT_DONE)
6762 free_some_resources(adapter);
6763 iounmap(adapter->regs);
6764 if (!is_t4(adapter->params.chip))
6765 iounmap(adapter->bar2);
6766 pci_disable_pcie_error_reporting(pdev);
6767 if ((adapter->flags & DEV_ENABLED)) {
6768 pci_disable_device(pdev);
6769 adapter->flags &= ~DEV_ENABLED;
6771 pci_release_regions(pdev);
6775 pci_release_regions(pdev);
6778 static struct pci_driver cxgb4_driver = {
6779 .name = KBUILD_MODNAME,
6780 .id_table = cxgb4_pci_tbl,
6782 .remove = remove_one,
6783 .shutdown = remove_one,
6784 .err_handler = &cxgb4_eeh,
6787 static int __init cxgb4_init_module(void)
6791 workq = create_singlethread_workqueue("cxgb4");
6795 /* Debugfs support is optional, just warn if this fails */
6796 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6797 if (!cxgb4_debugfs_root)
6798 pr_warn("could not create debugfs entry, continuing\n");
6800 ret = pci_register_driver(&cxgb4_driver);
6802 debugfs_remove(cxgb4_debugfs_root);
6803 destroy_workqueue(workq);
6806 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6811 static void __exit cxgb4_cleanup_module(void)
6813 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6814 pci_unregister_driver(&cxgb4_driver);
6815 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6816 flush_workqueue(workq);
6817 destroy_workqueue(workq);
6820 module_init(cxgb4_init_module);
6821 module_exit(cxgb4_cleanup_module);