2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
70 #include "cxgb4_dcb.h"
73 #include <../drivers/net/bonding/bonding.h>
78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to
86 #define MAX_SGE_TIMERVAL 200U
90 * Physical Function provisioning constants.
92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
104 #ifdef CONFIG_PCI_IOV
106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an
113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware
135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf)
138 unsigned int portn, portvec;
141 * Give PF's access to all of the ports.
144 return FW_PFVF_CMD_PMASK_MASK;
147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this.
153 if (adapter->params.nports == 0)
156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec;
160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ...
165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128,
185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large.
191 struct filter_entry {
192 /* Administrative fields for filter.
194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */
197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID.
206 struct ch_filter_specification fs;
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
215 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
216 CH_DEVICE(0xa000, 0), /* PE10K */
217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1),
227 CH_DEVICE(0x4401, 4),
228 CH_DEVICE(0x4402, 4),
229 CH_DEVICE(0x4403, 4),
230 CH_DEVICE(0x4404, 4),
231 CH_DEVICE(0x4405, 4),
232 CH_DEVICE(0x4406, 4),
233 CH_DEVICE(0x4407, 4),
234 CH_DEVICE(0x4408, 4),
235 CH_DEVICE(0x4409, 4),
236 CH_DEVICE(0x440a, 4),
237 CH_DEVICE(0x440d, 4),
238 CH_DEVICE(0x440e, 4),
239 CH_DEVICE(0x5001, 4),
240 CH_DEVICE(0x5002, 4),
241 CH_DEVICE(0x5003, 4),
242 CH_DEVICE(0x5004, 4),
243 CH_DEVICE(0x5005, 4),
244 CH_DEVICE(0x5006, 4),
245 CH_DEVICE(0x5007, 4),
246 CH_DEVICE(0x5008, 4),
247 CH_DEVICE(0x5009, 4),
248 CH_DEVICE(0x500A, 4),
249 CH_DEVICE(0x500B, 4),
250 CH_DEVICE(0x500C, 4),
251 CH_DEVICE(0x500D, 4),
252 CH_DEVICE(0x500E, 4),
253 CH_DEVICE(0x500F, 4),
254 CH_DEVICE(0x5010, 4),
255 CH_DEVICE(0x5011, 4),
256 CH_DEVICE(0x5012, 4),
257 CH_DEVICE(0x5013, 4),
258 CH_DEVICE(0x5014, 4),
259 CH_DEVICE(0x5015, 4),
260 CH_DEVICE(0x5080, 4),
261 CH_DEVICE(0x5081, 4),
262 CH_DEVICE(0x5082, 4),
263 CH_DEVICE(0x5083, 4),
264 CH_DEVICE(0x5084, 4),
265 CH_DEVICE(0x5085, 4),
266 CH_DEVICE(0x5401, 4),
267 CH_DEVICE(0x5402, 4),
268 CH_DEVICE(0x5403, 4),
269 CH_DEVICE(0x5404, 4),
270 CH_DEVICE(0x5405, 4),
271 CH_DEVICE(0x5406, 4),
272 CH_DEVICE(0x5407, 4),
273 CH_DEVICE(0x5408, 4),
274 CH_DEVICE(0x5409, 4),
275 CH_DEVICE(0x540A, 4),
276 CH_DEVICE(0x540B, 4),
277 CH_DEVICE(0x540C, 4),
278 CH_DEVICE(0x540D, 4),
279 CH_DEVICE(0x540E, 4),
280 CH_DEVICE(0x540F, 4),
281 CH_DEVICE(0x5410, 4),
282 CH_DEVICE(0x5411, 4),
283 CH_DEVICE(0x5412, 4),
284 CH_DEVICE(0x5413, 4),
285 CH_DEVICE(0x5414, 4),
286 CH_DEVICE(0x5415, 4),
287 CH_DEVICE(0x5480, 4),
288 CH_DEVICE(0x5481, 4),
289 CH_DEVICE(0x5482, 4),
290 CH_DEVICE(0x5483, 4),
291 CH_DEVICE(0x5484, 4),
292 CH_DEVICE(0x5485, 4),
296 #define FW4_FNAME "cxgb4/t4fw.bin"
297 #define FW5_FNAME "cxgb4/t5fw.bin"
298 #define FW4_CFNAME "cxgb4/t4-config.txt"
299 #define FW5_CFNAME "cxgb4/t5-config.txt"
301 MODULE_DESCRIPTION(DRV_DESC);
302 MODULE_AUTHOR("Chelsio Communications");
303 MODULE_LICENSE("Dual BSD/GPL");
304 MODULE_VERSION(DRV_VERSION);
305 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
306 MODULE_FIRMWARE(FW4_FNAME);
307 MODULE_FIRMWARE(FW5_FNAME);
310 * Normally we're willing to become the firmware's Master PF but will be happy
311 * if another PF has already become the Master and initialized the adapter.
312 * Setting "force_init" will cause this driver to forcibly establish itself as
313 * the Master PF and initialize the adapter.
315 static uint force_init;
317 module_param(force_init, uint, 0644);
318 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
321 * Normally if the firmware we connect to has Configuration File support, we
322 * use that and only fall back to the old Driver-based initialization if the
323 * Configuration File fails for some reason. If force_old_init is set, then
324 * we'll always use the old Driver-based initialization sequence.
326 static uint force_old_init;
328 module_param(force_old_init, uint, 0644);
329 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
331 static int dflt_msg_enable = DFLT_MSG_ENABLE;
333 module_param(dflt_msg_enable, int, 0644);
334 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
337 * The driver uses the best interrupt scheme available on a platform in the
338 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
339 * of these schemes the driver may consider as follows:
341 * msi = 2: choose from among all three options
342 * msi = 1: only consider MSI and INTx interrupts
343 * msi = 0: force INTx interrupts
347 module_param(msi, int, 0644);
348 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
351 * Queue interrupt hold-off timer values. Queues default to the first of these
354 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
356 module_param_array(intr_holdoff, uint, NULL, 0644);
357 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
358 "0..4 in microseconds");
360 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
362 module_param_array(intr_cnt, uint, NULL, 0644);
363 MODULE_PARM_DESC(intr_cnt,
364 "thresholds 1..3 for queue interrupt packet counters");
367 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
368 * offset by 2 bytes in order to have the IP headers line up on 4-byte
369 * boundaries. This is a requirement for many architectures which will throw
370 * a machine check fault if an attempt is made to access one of the 4-byte IP
371 * header fields on a non-4-byte boundary. And it's a major performance issue
372 * even on some architectures which allow it like some implementations of the
373 * x86 ISA. However, some architectures don't mind this and for some very
374 * edge-case performance sensitive applications (like forwarding large volumes
375 * of small packets), setting this DMA offset to 0 will decrease the number of
376 * PCI-E Bus transfers enough to measurably affect performance.
378 static int rx_dma_offset = 2;
382 #ifdef CONFIG_PCI_IOV
383 module_param(vf_acls, bool, 0644);
384 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
386 /* Configure the number of PCI-E Virtual Function which are to be instantiated
387 * on SR-IOV Capable Physical Functions.
389 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
391 module_param_array(num_vf, uint, NULL, 0644);
392 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
395 /* TX Queue select used to determine what algorithm to use for selecting TX
396 * queue. Select between the kernel provided function (select_queue=0) or user
397 * cxgb_select_queue function (select_queue=1)
399 * Default: select_queue=0
401 static int select_queue;
402 module_param(select_queue, int, 0644);
403 MODULE_PARM_DESC(select_queue,
404 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
407 * The filter TCAM has a fixed portion and a variable portion. The fixed
408 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
409 * ports. The variable portion is 36 bits which can include things like Exact
410 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
411 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
412 * far exceed the 36-bit budget for this "compressed" header portion of the
413 * filter. Thus, we have a scarce resource which must be carefully managed.
415 * By default we set this up to mostly match the set of filter matching
416 * capabilities of T3 but with accommodations for some of T4's more
417 * interesting features:
419 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
420 * [Inner] VLAN (17), Port (3), FCoE (1) }
423 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
424 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
425 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
428 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
430 module_param(tp_vlan_pri_map, uint, 0644);
431 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
433 static struct dentry *cxgb4_debugfs_root;
435 static LIST_HEAD(adapter_list);
436 static DEFINE_MUTEX(uld_mutex);
437 /* Adapter list to be accessed from atomic context */
438 static LIST_HEAD(adap_rcu_list);
439 static DEFINE_SPINLOCK(adap_rcu_lock);
440 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
441 static const char *uld_str[] = { "RDMA", "iSCSI" };
443 static void link_report(struct net_device *dev)
445 if (!netif_carrier_ok(dev))
446 netdev_info(dev, "link down\n");
448 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
450 const char *s = "10Mbps";
451 const struct port_info *p = netdev_priv(dev);
453 switch (p->link_cfg.speed) {
468 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
473 #ifdef CONFIG_CHELSIO_T4_DCB
474 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
475 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
477 struct port_info *pi = netdev_priv(dev);
478 struct adapter *adap = pi->adapter;
479 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
482 /* We use a simple mapping of Port TX Queue Index to DCB
483 * Priority when we're enabling DCB.
485 for (i = 0; i < pi->nqsets; i++, txq++) {
489 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
490 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
491 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
492 value = enable ? i : 0xffffffff;
494 /* Since we can be called while atomic (from "interrupt
495 * level") we need to issue the Set Parameters Commannd
496 * without sleeping (timeout < 0).
498 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
502 dev_err(adap->pdev_dev,
503 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
504 enable ? "set" : "unset", pi->port_id, i, -err);
507 #endif /* CONFIG_CHELSIO_T4_DCB */
509 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
511 struct net_device *dev = adapter->port[port_id];
513 /* Skip changes from disabled ports. */
514 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
516 netif_carrier_on(dev);
518 #ifdef CONFIG_CHELSIO_T4_DCB
519 cxgb4_dcb_state_init(dev);
520 dcb_tx_queue_prio_enable(dev, false);
521 #endif /* CONFIG_CHELSIO_T4_DCB */
522 netif_carrier_off(dev);
529 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
531 static const char *mod_str[] = {
532 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
535 const struct net_device *dev = adap->port[port_id];
536 const struct port_info *pi = netdev_priv(dev);
538 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
539 netdev_info(dev, "port module unplugged\n");
540 else if (pi->mod_type < ARRAY_SIZE(mod_str))
541 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
545 * Configure the exact and hash address filters to handle a port's multicast
546 * and secondary unicast MAC addresses.
548 static int set_addr_filters(const struct net_device *dev, bool sleep)
556 const struct netdev_hw_addr *ha;
557 int uc_cnt = netdev_uc_count(dev);
558 int mc_cnt = netdev_mc_count(dev);
559 const struct port_info *pi = netdev_priv(dev);
560 unsigned int mb = pi->adapter->fn;
562 /* first do the secondary unicast addresses */
563 netdev_for_each_uc_addr(ha, dev) {
564 addr[naddr++] = ha->addr;
565 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
566 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
567 naddr, addr, filt_idx, &uhash, sleep);
576 /* next set up the multicast addresses */
577 netdev_for_each_mc_addr(ha, dev) {
578 addr[naddr++] = ha->addr;
579 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
580 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
581 naddr, addr, filt_idx, &mhash, sleep);
590 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
591 uhash | mhash, sleep);
594 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
595 module_param(dbfifo_int_thresh, int, 0644);
596 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
599 * usecs to sleep while draining the dbfifo
601 static int dbfifo_drain_delay = 1000;
602 module_param(dbfifo_drain_delay, int, 0644);
603 MODULE_PARM_DESC(dbfifo_drain_delay,
604 "usecs to sleep while draining the dbfifo");
607 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
608 * If @mtu is -1 it is left unchanged.
610 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
613 struct port_info *pi = netdev_priv(dev);
615 ret = set_addr_filters(dev, sleep_ok);
617 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
618 (dev->flags & IFF_PROMISC) ? 1 : 0,
619 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
624 static struct workqueue_struct *workq;
627 * link_start - enable a port
628 * @dev: the port to enable
630 * Performs the MAC and PHY actions needed to enable a port.
632 static int link_start(struct net_device *dev)
635 struct port_info *pi = netdev_priv(dev);
636 unsigned int mb = pi->adapter->fn;
639 * We do not set address filters and promiscuity here, the stack does
640 * that step explicitly.
642 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
643 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
645 ret = t4_change_mac(pi->adapter, mb, pi->viid,
646 pi->xact_addr_filt, dev->dev_addr, true,
649 pi->xact_addr_filt = ret;
654 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
657 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
658 true, CXGB4_DCB_ENABLED);
663 int cxgb4_dcb_enabled(const struct net_device *dev)
665 #ifdef CONFIG_CHELSIO_T4_DCB
666 struct port_info *pi = netdev_priv(dev);
668 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
673 EXPORT_SYMBOL(cxgb4_dcb_enabled);
675 #ifdef CONFIG_CHELSIO_T4_DCB
676 /* Handle a Data Center Bridging update message from the firmware. */
677 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
679 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
680 struct net_device *dev = adap->port[port];
681 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
684 cxgb4_dcb_handle_fw_update(adap, pcmd);
685 new_dcb_enabled = cxgb4_dcb_enabled(dev);
687 /* If the DCB has become enabled or disabled on the port then we're
688 * going to need to set up/tear down DCB Priority parameters for the
689 * TX Queues associated with the port.
691 if (new_dcb_enabled != old_dcb_enabled)
692 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
694 #endif /* CONFIG_CHELSIO_T4_DCB */
696 /* Clear a filter and release any of its resources that we own. This also
697 * clears the filter's "pending" status.
699 static void clear_filter(struct adapter *adap, struct filter_entry *f)
701 /* If the new or old filter have loopback rewriteing rules then we'll
702 * need to free any existing Layer Two Table (L2T) entries of the old
703 * filter rule. The firmware will handle freeing up any Source MAC
704 * Table (SMT) entries used for rewriting Source MAC Addresses in
708 cxgb4_l2t_release(f->l2t);
710 /* The zeroing of the filter rule below clears the filter valid,
711 * pending, locked flags, l2t pointer, etc. so it's all we need for
714 memset(f, 0, sizeof(*f));
717 /* Handle a filter write/deletion reply.
719 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
721 unsigned int idx = GET_TID(rpl);
722 unsigned int nidx = idx - adap->tids.ftid_base;
724 struct filter_entry *f;
726 if (idx >= adap->tids.ftid_base && nidx <
727 (adap->tids.nftids + adap->tids.nsftids)) {
729 ret = GET_TCB_COOKIE(rpl->cookie);
730 f = &adap->tids.ftid_tab[idx];
732 if (ret == FW_FILTER_WR_FLT_DELETED) {
733 /* Clear the filter when we get confirmation from the
734 * hardware that the filter has been deleted.
736 clear_filter(adap, f);
737 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
738 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
740 clear_filter(adap, f);
741 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
742 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
743 f->pending = 0; /* asynchronous setup completed */
746 /* Something went wrong. Issue a warning about the
747 * problem and clear everything out.
749 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
751 clear_filter(adap, f);
756 /* Response queue handler for the FW event queue.
758 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
759 const struct pkt_gl *gl)
761 u8 opcode = ((const struct rss_header *)rsp)->opcode;
763 rsp++; /* skip RSS header */
765 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
767 if (unlikely(opcode == CPL_FW4_MSG &&
768 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
770 opcode = ((const struct rss_header *)rsp)->opcode;
772 if (opcode != CPL_SGE_EGR_UPDATE) {
773 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
779 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
780 const struct cpl_sge_egr_update *p = (void *)rsp;
781 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
784 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
786 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
787 struct sge_eth_txq *eq;
789 eq = container_of(txq, struct sge_eth_txq, q);
790 netif_tx_wake_queue(eq->txq);
792 struct sge_ofld_txq *oq;
794 oq = container_of(txq, struct sge_ofld_txq, q);
795 tasklet_schedule(&oq->qresume_tsk);
797 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
798 const struct cpl_fw6_msg *p = (void *)rsp;
800 #ifdef CONFIG_CHELSIO_T4_DCB
801 const struct fw_port_cmd *pcmd = (const void *)p->data;
802 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
803 unsigned int action =
804 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
806 if (cmd == FW_PORT_CMD &&
807 action == FW_PORT_ACTION_GET_PORT_INFO) {
808 int port = FW_PORT_CMD_PORTID_GET(
809 be32_to_cpu(pcmd->op_to_portid));
810 struct net_device *dev = q->adap->port[port];
811 int state_input = ((pcmd->u.info.dcbxdis_pkd &
813 ? CXGB4_DCB_INPUT_FW_DISABLED
814 : CXGB4_DCB_INPUT_FW_ENABLED);
816 cxgb4_dcb_state_fsm(dev, state_input);
819 if (cmd == FW_PORT_CMD &&
820 action == FW_PORT_ACTION_L2_DCB_CFG)
821 dcb_rpl(q->adap, pcmd);
825 t4_handle_fw_rpl(q->adap, p->data);
826 } else if (opcode == CPL_L2T_WRITE_RPL) {
827 const struct cpl_l2t_write_rpl *p = (void *)rsp;
829 do_l2t_write_rpl(q->adap, p);
830 } else if (opcode == CPL_SET_TCB_RPL) {
831 const struct cpl_set_tcb_rpl *p = (void *)rsp;
833 filter_rpl(q->adap, p);
835 dev_err(q->adap->pdev_dev,
836 "unexpected CPL %#x on FW event queue\n", opcode);
842 * uldrx_handler - response queue handler for ULD queues
843 * @q: the response queue that received the packet
844 * @rsp: the response queue descriptor holding the offload message
845 * @gl: the gather list of packet fragments
847 * Deliver an ingress offload packet to a ULD. All processing is done by
848 * the ULD, we just maintain statistics.
850 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
851 const struct pkt_gl *gl)
853 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
855 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
857 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
858 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
861 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
867 else if (gl == CXGB4_MSG_AN)
874 static void disable_msi(struct adapter *adapter)
876 if (adapter->flags & USING_MSIX) {
877 pci_disable_msix(adapter->pdev);
878 adapter->flags &= ~USING_MSIX;
879 } else if (adapter->flags & USING_MSI) {
880 pci_disable_msi(adapter->pdev);
881 adapter->flags &= ~USING_MSI;
886 * Interrupt handler for non-data events used with MSI-X.
888 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
890 struct adapter *adap = cookie;
892 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
895 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
897 t4_slow_intr_handler(adap);
902 * Name the MSI-X interrupts.
904 static void name_msix_vecs(struct adapter *adap)
906 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
908 /* non-data interrupts */
909 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
912 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
913 adap->port[0]->name);
915 /* Ethernet queues */
916 for_each_port(adap, j) {
917 struct net_device *d = adap->port[j];
918 const struct port_info *pi = netdev_priv(d);
920 for (i = 0; i < pi->nqsets; i++, msi_idx++)
921 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
926 for_each_ofldrxq(&adap->sge, i)
927 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
928 adap->port[0]->name, i);
930 for_each_rdmarxq(&adap->sge, i)
931 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
932 adap->port[0]->name, i);
934 for_each_rdmaciq(&adap->sge, i)
935 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
936 adap->port[0]->name, i);
939 static int request_msix_queue_irqs(struct adapter *adap)
941 struct sge *s = &adap->sge;
942 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
945 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
946 adap->msix_info[1].desc, &s->fw_evtq);
950 for_each_ethrxq(s, ethqidx) {
951 err = request_irq(adap->msix_info[msi_index].vec,
953 adap->msix_info[msi_index].desc,
954 &s->ethrxq[ethqidx].rspq);
959 for_each_ofldrxq(s, ofldqidx) {
960 err = request_irq(adap->msix_info[msi_index].vec,
962 adap->msix_info[msi_index].desc,
963 &s->ofldrxq[ofldqidx].rspq);
968 for_each_rdmarxq(s, rdmaqidx) {
969 err = request_irq(adap->msix_info[msi_index].vec,
971 adap->msix_info[msi_index].desc,
972 &s->rdmarxq[rdmaqidx].rspq);
977 for_each_rdmaciq(s, rdmaciqqidx) {
978 err = request_irq(adap->msix_info[msi_index].vec,
980 adap->msix_info[msi_index].desc,
981 &s->rdmaciq[rdmaciqqidx].rspq);
989 while (--rdmaciqqidx >= 0)
990 free_irq(adap->msix_info[--msi_index].vec,
991 &s->rdmaciq[rdmaciqqidx].rspq);
992 while (--rdmaqidx >= 0)
993 free_irq(adap->msix_info[--msi_index].vec,
994 &s->rdmarxq[rdmaqidx].rspq);
995 while (--ofldqidx >= 0)
996 free_irq(adap->msix_info[--msi_index].vec,
997 &s->ofldrxq[ofldqidx].rspq);
998 while (--ethqidx >= 0)
999 free_irq(adap->msix_info[--msi_index].vec,
1000 &s->ethrxq[ethqidx].rspq);
1001 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1005 static void free_msix_queue_irqs(struct adapter *adap)
1007 int i, msi_index = 2;
1008 struct sge *s = &adap->sge;
1010 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1011 for_each_ethrxq(s, i)
1012 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1013 for_each_ofldrxq(s, i)
1014 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1015 for_each_rdmarxq(s, i)
1016 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1017 for_each_rdmaciq(s, i)
1018 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1022 * write_rss - write the RSS table for a given port
1024 * @queues: array of queue indices for RSS
1026 * Sets up the portion of the HW RSS table for the port's VI to distribute
1027 * packets to the Rx queues in @queues.
1029 static int write_rss(const struct port_info *pi, const u16 *queues)
1033 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1035 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1039 /* map the queue indices to queue ids */
1040 for (i = 0; i < pi->rss_size; i++, queues++)
1041 rss[i] = q[*queues].rspq.abs_id;
1043 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1044 pi->rss_size, rss, pi->rss_size);
1050 * setup_rss - configure RSS
1051 * @adap: the adapter
1053 * Sets up RSS for each port.
1055 static int setup_rss(struct adapter *adap)
1059 for_each_port(adap, i) {
1060 const struct port_info *pi = adap2pinfo(adap, i);
1062 err = write_rss(pi, pi->rss);
1070 * Return the channel of the ingress queue with the given qid.
1072 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1074 qid -= p->ingr_start;
1075 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1079 * Wait until all NAPI handlers are descheduled.
1081 static void quiesce_rx(struct adapter *adap)
1085 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1086 struct sge_rspq *q = adap->sge.ingr_map[i];
1088 if (q && q->handler)
1089 napi_disable(&q->napi);
1094 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1096 static void enable_rx(struct adapter *adap)
1100 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1101 struct sge_rspq *q = adap->sge.ingr_map[i];
1106 napi_enable(&q->napi);
1107 /* 0-increment GTS to start the timer and enable interrupts */
1108 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1109 SEINTARM(q->intr_params) |
1110 INGRESSQID(q->cntxt_id));
1115 * setup_sge_queues - configure SGE Tx/Rx/response queues
1116 * @adap: the adapter
1118 * Determines how many sets of SGE queues to use and initializes them.
1119 * We support multiple queue sets per port if we have MSI-X, otherwise
1120 * just one queue set per port.
1122 static int setup_sge_queues(struct adapter *adap)
1124 int err, msi_idx, i, j;
1125 struct sge *s = &adap->sge;
1127 bitmap_zero(s->starving_fl, MAX_EGRQ);
1128 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1130 if (adap->flags & USING_MSIX)
1131 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1133 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1137 msi_idx = -((int)s->intrq.abs_id + 1);
1140 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1141 msi_idx, NULL, fwevtq_handler);
1143 freeout: t4_free_sge_resources(adap);
1147 for_each_port(adap, i) {
1148 struct net_device *dev = adap->port[i];
1149 struct port_info *pi = netdev_priv(dev);
1150 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1151 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1153 for (j = 0; j < pi->nqsets; j++, q++) {
1156 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1162 memset(&q->stats, 0, sizeof(q->stats));
1164 for (j = 0; j < pi->nqsets; j++, t++) {
1165 err = t4_sge_alloc_eth_txq(adap, t, dev,
1166 netdev_get_tx_queue(dev, j),
1167 s->fw_evtq.cntxt_id);
1173 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1174 for_each_ofldrxq(s, i) {
1175 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1176 struct net_device *dev = adap->port[i / j];
1180 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1181 q->fl.size ? &q->fl : NULL,
1185 memset(&q->stats, 0, sizeof(q->stats));
1186 s->ofld_rxq[i] = q->rspq.abs_id;
1187 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1188 s->fw_evtq.cntxt_id);
1193 for_each_rdmarxq(s, i) {
1194 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1198 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1199 msi_idx, q->fl.size ? &q->fl : NULL,
1203 memset(&q->stats, 0, sizeof(q->stats));
1204 s->rdma_rxq[i] = q->rspq.abs_id;
1207 for_each_rdmaciq(s, i) {
1208 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1212 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1213 msi_idx, q->fl.size ? &q->fl : NULL,
1217 memset(&q->stats, 0, sizeof(q->stats));
1218 s->rdma_ciq[i] = q->rspq.abs_id;
1221 for_each_port(adap, i) {
1223 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1224 * have RDMA queues, and that's the right value.
1226 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1227 s->fw_evtq.cntxt_id,
1228 s->rdmarxq[i].rspq.cntxt_id);
1233 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1234 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1235 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1240 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1241 * The allocated memory is cleared.
1243 void *t4_alloc_mem(size_t size)
1245 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1253 * Free memory allocated through alloc_mem().
1255 static void t4_free_mem(void *addr)
1257 if (is_vmalloc_addr(addr))
1263 /* Send a Work Request to write the filter at a specified index. We construct
1264 * a Firmware Filter Work Request to have the work done and put the indicated
1265 * filter into "pending" mode which will prevent any further actions against
1266 * it till we get a reply from the firmware on the completion status of the
1269 static int set_filter_wr(struct adapter *adapter, int fidx)
1271 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1272 struct sk_buff *skb;
1273 struct fw_filter_wr *fwr;
1276 /* If the new filter requires loopback Destination MAC and/or VLAN
1277 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1280 if (f->fs.newdmac || f->fs.newvlan) {
1281 /* allocate L2T entry for new filter */
1282 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1285 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1286 f->fs.eport, f->fs.dmac)) {
1287 cxgb4_l2t_release(f->l2t);
1293 ftid = adapter->tids.ftid_base + fidx;
1295 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1296 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1297 memset(fwr, 0, sizeof(*fwr));
1299 /* It would be nice to put most of the following in t4_hw.c but most
1300 * of the work is translating the cxgbtool ch_filter_specification
1301 * into the Work Request and the definition of that structure is
1302 * currently in cxgbtool.h which isn't appropriate to pull into the
1303 * common code. We may eventually try to come up with a more neutral
1304 * filter specification structure but for now it's easiest to simply
1305 * put this fairly direct code in line ...
1307 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1308 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1310 htonl(V_FW_FILTER_WR_TID(ftid) |
1311 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1312 V_FW_FILTER_WR_NOREPLY(0) |
1313 V_FW_FILTER_WR_IQ(f->fs.iq));
1314 fwr->del_filter_to_l2tix =
1315 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1316 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1317 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1318 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1319 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1320 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1321 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1322 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1323 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1324 f->fs.newvlan == VLAN_REWRITE) |
1325 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1326 f->fs.newvlan == VLAN_REWRITE) |
1327 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1328 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1329 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1330 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1331 fwr->ethtype = htons(f->fs.val.ethtype);
1332 fwr->ethtypem = htons(f->fs.mask.ethtype);
1333 fwr->frag_to_ovlan_vldm =
1334 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1335 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1336 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1337 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1338 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1339 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1341 fwr->rx_chan_rx_rpl_iq =
1342 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1343 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1344 fwr->maci_to_matchtypem =
1345 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1346 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1347 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1348 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1349 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1350 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1351 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1352 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1353 fwr->ptcl = f->fs.val.proto;
1354 fwr->ptclm = f->fs.mask.proto;
1355 fwr->ttyp = f->fs.val.tos;
1356 fwr->ttypm = f->fs.mask.tos;
1357 fwr->ivlan = htons(f->fs.val.ivlan);
1358 fwr->ivlanm = htons(f->fs.mask.ivlan);
1359 fwr->ovlan = htons(f->fs.val.ovlan);
1360 fwr->ovlanm = htons(f->fs.mask.ovlan);
1361 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1362 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1363 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1364 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1365 fwr->lp = htons(f->fs.val.lport);
1366 fwr->lpm = htons(f->fs.mask.lport);
1367 fwr->fp = htons(f->fs.val.fport);
1368 fwr->fpm = htons(f->fs.mask.fport);
1370 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1372 /* Mark the filter as "pending" and ship off the Filter Work Request.
1373 * When we get the Work Request Reply we'll clear the pending status.
1376 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1377 t4_ofld_send(adapter, skb);
1381 /* Delete the filter at a specified index.
1383 static int del_filter_wr(struct adapter *adapter, int fidx)
1385 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1386 struct sk_buff *skb;
1387 struct fw_filter_wr *fwr;
1388 unsigned int len, ftid;
1391 ftid = adapter->tids.ftid_base + fidx;
1393 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1394 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1395 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1397 /* Mark the filter as "pending" and ship off the Filter Work Request.
1398 * When we get the Work Request Reply we'll clear the pending status.
1401 t4_mgmt_tx(adapter, skb);
1405 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1406 void *accel_priv, select_queue_fallback_t fallback)
1410 #ifdef CONFIG_CHELSIO_T4_DCB
1411 /* If a Data Center Bridging has been successfully negotiated on this
1412 * link then we'll use the skb's priority to map it to a TX Queue.
1413 * The skb's priority is determined via the VLAN Tag Priority Code
1416 if (cxgb4_dcb_enabled(dev)) {
1420 err = vlan_get_tag(skb, &vlan_tci);
1421 if (unlikely(err)) {
1422 if (net_ratelimit())
1424 "TX Packet without VLAN Tag on DCB Link\n");
1427 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1431 #endif /* CONFIG_CHELSIO_T4_DCB */
1434 txq = (skb_rx_queue_recorded(skb)
1435 ? skb_get_rx_queue(skb)
1436 : smp_processor_id());
1438 while (unlikely(txq >= dev->real_num_tx_queues))
1439 txq -= dev->real_num_tx_queues;
1444 return fallback(dev, skb) % dev->real_num_tx_queues;
1447 static inline int is_offload(const struct adapter *adap)
1449 return adap->params.offload;
1453 * Implementation of ethtool operations.
1456 static u32 get_msglevel(struct net_device *dev)
1458 return netdev2adap(dev)->msg_enable;
1461 static void set_msglevel(struct net_device *dev, u32 val)
1463 netdev2adap(dev)->msg_enable = val;
1466 static char stats_strings[][ETH_GSTRING_LEN] = {
1469 "TxBroadcastFrames ",
1470 "TxMulticastFrames ",
1476 "TxFrames128To255 ",
1477 "TxFrames256To511 ",
1478 "TxFrames512To1023 ",
1479 "TxFrames1024To1518 ",
1480 "TxFrames1519ToMax ",
1495 "RxBroadcastFrames ",
1496 "RxMulticastFrames ",
1508 "RxFrames128To255 ",
1509 "RxFrames256To511 ",
1510 "RxFrames512To1023 ",
1511 "RxFrames1024To1518 ",
1512 "RxFrames1519ToMax ",
1524 "RxBG0FramesDropped ",
1525 "RxBG1FramesDropped ",
1526 "RxBG2FramesDropped ",
1527 "RxBG3FramesDropped ",
1528 "RxBG0FramesTrunc ",
1529 "RxBG1FramesTrunc ",
1530 "RxBG2FramesTrunc ",
1531 "RxBG3FramesTrunc ",
1540 "WriteCoalSuccess ",
1544 static int get_sset_count(struct net_device *dev, int sset)
1548 return ARRAY_SIZE(stats_strings);
1554 #define T4_REGMAP_SIZE (160 * 1024)
1555 #define T5_REGMAP_SIZE (332 * 1024)
1557 static int get_regs_len(struct net_device *dev)
1559 struct adapter *adap = netdev2adap(dev);
1560 if (is_t4(adap->params.chip))
1561 return T4_REGMAP_SIZE;
1563 return T5_REGMAP_SIZE;
1566 static int get_eeprom_len(struct net_device *dev)
1571 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1573 struct adapter *adapter = netdev2adap(dev);
1575 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1576 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1577 strlcpy(info->bus_info, pci_name(adapter->pdev),
1578 sizeof(info->bus_info));
1580 if (adapter->params.fw_vers)
1581 snprintf(info->fw_version, sizeof(info->fw_version),
1582 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1583 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1584 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1585 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1586 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1587 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1588 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1589 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1590 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1593 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1595 if (stringset == ETH_SS_STATS)
1596 memcpy(data, stats_strings, sizeof(stats_strings));
1600 * port stats maintained per queue of the port. They should be in the same
1601 * order as in stats_strings above.
1603 struct queue_port_stats {
1613 static void collect_sge_port_stats(const struct adapter *adap,
1614 const struct port_info *p, struct queue_port_stats *s)
1617 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1618 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1620 memset(s, 0, sizeof(*s));
1621 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1623 s->tx_csum += tx->tx_cso;
1624 s->rx_csum += rx->stats.rx_cso;
1625 s->vlan_ex += rx->stats.vlan_ex;
1626 s->vlan_ins += tx->vlan_ins;
1627 s->gro_pkts += rx->stats.lro_pkts;
1628 s->gro_merged += rx->stats.lro_merged;
1632 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1635 struct port_info *pi = netdev_priv(dev);
1636 struct adapter *adapter = pi->adapter;
1639 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1641 data += sizeof(struct port_stats) / sizeof(u64);
1642 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1643 data += sizeof(struct queue_port_stats) / sizeof(u64);
1644 if (!is_t4(adapter->params.chip)) {
1645 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1646 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1647 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1648 *data = val1 - val2;
1653 memset(data, 0, 2 * sizeof(u64));
1659 * Return a version number to identify the type of adapter. The scheme is:
1660 * - bits 0..9: chip version
1661 * - bits 10..15: chip revision
1662 * - bits 16..23: register dump version
1664 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1666 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1667 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1670 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1673 u32 *p = buf + start;
1675 for ( ; start <= end; start += sizeof(u32))
1676 *p++ = t4_read_reg(ap, start);
1679 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1682 static const unsigned int t4_reg_ranges[] = {
1902 static const unsigned int t5_reg_ranges[] = {
2330 struct adapter *ap = netdev2adap(dev);
2331 static const unsigned int *reg_ranges;
2332 int arr_size = 0, buf_size = 0;
2334 if (is_t4(ap->params.chip)) {
2335 reg_ranges = &t4_reg_ranges[0];
2336 arr_size = ARRAY_SIZE(t4_reg_ranges);
2337 buf_size = T4_REGMAP_SIZE;
2339 reg_ranges = &t5_reg_ranges[0];
2340 arr_size = ARRAY_SIZE(t5_reg_ranges);
2341 buf_size = T5_REGMAP_SIZE;
2344 regs->version = mk_adap_vers(ap);
2346 memset(buf, 0, buf_size);
2347 for (i = 0; i < arr_size; i += 2)
2348 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2351 static int restart_autoneg(struct net_device *dev)
2353 struct port_info *p = netdev_priv(dev);
2355 if (!netif_running(dev))
2357 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2359 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2363 static int identify_port(struct net_device *dev,
2364 enum ethtool_phys_id_state state)
2367 struct adapter *adap = netdev2adap(dev);
2369 if (state == ETHTOOL_ID_ACTIVE)
2371 else if (state == ETHTOOL_ID_INACTIVE)
2376 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2379 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2383 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2384 type == FW_PORT_TYPE_BT_XAUI) {
2386 if (caps & FW_PORT_CAP_SPEED_100M)
2387 v |= SUPPORTED_100baseT_Full;
2388 if (caps & FW_PORT_CAP_SPEED_1G)
2389 v |= SUPPORTED_1000baseT_Full;
2390 if (caps & FW_PORT_CAP_SPEED_10G)
2391 v |= SUPPORTED_10000baseT_Full;
2392 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2393 v |= SUPPORTED_Backplane;
2394 if (caps & FW_PORT_CAP_SPEED_1G)
2395 v |= SUPPORTED_1000baseKX_Full;
2396 if (caps & FW_PORT_CAP_SPEED_10G)
2397 v |= SUPPORTED_10000baseKX4_Full;
2398 } else if (type == FW_PORT_TYPE_KR)
2399 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2400 else if (type == FW_PORT_TYPE_BP_AP)
2401 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2402 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2403 else if (type == FW_PORT_TYPE_BP4_AP)
2404 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2405 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2406 SUPPORTED_10000baseKX4_Full;
2407 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2408 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2409 v |= SUPPORTED_FIBRE;
2410 else if (type == FW_PORT_TYPE_BP40_BA)
2411 v |= SUPPORTED_40000baseSR4_Full;
2413 if (caps & FW_PORT_CAP_ANEG)
2414 v |= SUPPORTED_Autoneg;
2418 static unsigned int to_fw_linkcaps(unsigned int caps)
2422 if (caps & ADVERTISED_100baseT_Full)
2423 v |= FW_PORT_CAP_SPEED_100M;
2424 if (caps & ADVERTISED_1000baseT_Full)
2425 v |= FW_PORT_CAP_SPEED_1G;
2426 if (caps & ADVERTISED_10000baseT_Full)
2427 v |= FW_PORT_CAP_SPEED_10G;
2428 if (caps & ADVERTISED_40000baseSR4_Full)
2429 v |= FW_PORT_CAP_SPEED_40G;
2433 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2435 const struct port_info *p = netdev_priv(dev);
2437 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2438 p->port_type == FW_PORT_TYPE_BT_XFI ||
2439 p->port_type == FW_PORT_TYPE_BT_XAUI)
2440 cmd->port = PORT_TP;
2441 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2442 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2443 cmd->port = PORT_FIBRE;
2444 else if (p->port_type == FW_PORT_TYPE_SFP ||
2445 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2446 p->port_type == FW_PORT_TYPE_QSFP) {
2447 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2448 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2449 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2450 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2451 cmd->port = PORT_FIBRE;
2452 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2453 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2454 cmd->port = PORT_DA;
2456 cmd->port = PORT_OTHER;
2458 cmd->port = PORT_OTHER;
2460 if (p->mdio_addr >= 0) {
2461 cmd->phy_address = p->mdio_addr;
2462 cmd->transceiver = XCVR_EXTERNAL;
2463 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2464 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2466 cmd->phy_address = 0; /* not really, but no better option */
2467 cmd->transceiver = XCVR_INTERNAL;
2468 cmd->mdio_support = 0;
2471 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2472 cmd->advertising = from_fw_linkcaps(p->port_type,
2473 p->link_cfg.advertising);
2474 ethtool_cmd_speed_set(cmd,
2475 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2476 cmd->duplex = DUPLEX_FULL;
2477 cmd->autoneg = p->link_cfg.autoneg;
2483 static unsigned int speed_to_caps(int speed)
2486 return FW_PORT_CAP_SPEED_100M;
2488 return FW_PORT_CAP_SPEED_1G;
2490 return FW_PORT_CAP_SPEED_10G;
2492 return FW_PORT_CAP_SPEED_40G;
2496 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2499 struct port_info *p = netdev_priv(dev);
2500 struct link_config *lc = &p->link_cfg;
2501 u32 speed = ethtool_cmd_speed(cmd);
2503 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2506 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2508 * PHY offers a single speed. See if that's what's
2511 if (cmd->autoneg == AUTONEG_DISABLE &&
2512 (lc->supported & speed_to_caps(speed)))
2517 if (cmd->autoneg == AUTONEG_DISABLE) {
2518 cap = speed_to_caps(speed);
2520 if (!(lc->supported & cap) ||
2525 lc->requested_speed = cap;
2526 lc->advertising = 0;
2528 cap = to_fw_linkcaps(cmd->advertising);
2529 if (!(lc->supported & cap))
2531 lc->requested_speed = 0;
2532 lc->advertising = cap | FW_PORT_CAP_ANEG;
2534 lc->autoneg = cmd->autoneg;
2536 if (netif_running(dev))
2537 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2542 static void get_pauseparam(struct net_device *dev,
2543 struct ethtool_pauseparam *epause)
2545 struct port_info *p = netdev_priv(dev);
2547 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2548 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2549 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2552 static int set_pauseparam(struct net_device *dev,
2553 struct ethtool_pauseparam *epause)
2555 struct port_info *p = netdev_priv(dev);
2556 struct link_config *lc = &p->link_cfg;
2558 if (epause->autoneg == AUTONEG_DISABLE)
2559 lc->requested_fc = 0;
2560 else if (lc->supported & FW_PORT_CAP_ANEG)
2561 lc->requested_fc = PAUSE_AUTONEG;
2565 if (epause->rx_pause)
2566 lc->requested_fc |= PAUSE_RX;
2567 if (epause->tx_pause)
2568 lc->requested_fc |= PAUSE_TX;
2569 if (netif_running(dev))
2570 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2575 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2577 const struct port_info *pi = netdev_priv(dev);
2578 const struct sge *s = &pi->adapter->sge;
2580 e->rx_max_pending = MAX_RX_BUFFERS;
2581 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2582 e->rx_jumbo_max_pending = 0;
2583 e->tx_max_pending = MAX_TXQ_ENTRIES;
2585 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2586 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2587 e->rx_jumbo_pending = 0;
2588 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2591 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2594 const struct port_info *pi = netdev_priv(dev);
2595 struct adapter *adapter = pi->adapter;
2596 struct sge *s = &adapter->sge;
2598 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2599 e->tx_pending > MAX_TXQ_ENTRIES ||
2600 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2601 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2602 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2605 if (adapter->flags & FULL_INIT_DONE)
2608 for (i = 0; i < pi->nqsets; ++i) {
2609 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2610 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2611 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2616 static int closest_timer(const struct sge *s, int time)
2618 int i, delta, match = 0, min_delta = INT_MAX;
2620 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2621 delta = time - s->timer_val[i];
2624 if (delta < min_delta) {
2632 static int closest_thres(const struct sge *s, int thres)
2634 int i, delta, match = 0, min_delta = INT_MAX;
2636 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2637 delta = thres - s->counter_val[i];
2640 if (delta < min_delta) {
2649 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2651 static unsigned int qtimer_val(const struct adapter *adap,
2652 const struct sge_rspq *q)
2654 unsigned int idx = q->intr_params >> 1;
2656 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2660 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2662 * @us: the hold-off time in us, or 0 to disable timer
2663 * @cnt: the hold-off packet count, or 0 to disable counter
2665 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2666 * one of the two needs to be enabled for the queue to generate interrupts.
2668 static int set_rspq_intr_params(struct sge_rspq *q,
2669 unsigned int us, unsigned int cnt)
2671 struct adapter *adap = q->adap;
2673 if ((us | cnt) == 0)
2680 new_idx = closest_thres(&adap->sge, cnt);
2681 if (q->desc && q->pktcnt_idx != new_idx) {
2682 /* the queue has already been created, update it */
2683 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2684 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2685 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2686 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2691 q->pktcnt_idx = new_idx;
2694 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2695 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2700 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2701 * @dev: the network device
2702 * @us: the hold-off time in us, or 0 to disable timer
2703 * @cnt: the hold-off packet count, or 0 to disable counter
2705 * Set the RX interrupt hold-off parameters for a network device.
2707 static int set_rx_intr_params(struct net_device *dev,
2708 unsigned int us, unsigned int cnt)
2711 struct port_info *pi = netdev_priv(dev);
2712 struct adapter *adap = pi->adapter;
2713 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2715 for (i = 0; i < pi->nqsets; i++, q++) {
2716 err = set_rspq_intr_params(&q->rspq, us, cnt);
2723 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2725 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2726 c->rx_max_coalesced_frames);
2729 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2731 const struct port_info *pi = netdev_priv(dev);
2732 const struct adapter *adap = pi->adapter;
2733 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2735 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2736 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2737 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2742 * eeprom_ptov - translate a physical EEPROM address to virtual
2743 * @phys_addr: the physical EEPROM address
2744 * @fn: the PCI function number
2745 * @sz: size of function-specific area
2747 * Translate a physical EEPROM address to virtual. The first 1K is
2748 * accessed through virtual addresses starting at 31K, the rest is
2749 * accessed through virtual addresses starting at 0.
2751 * The mapping is as follows:
2752 * [0..1K) -> [31K..32K)
2753 * [1K..1K+A) -> [31K-A..31K)
2754 * [1K+A..ES) -> [0..ES-A-1K)
2756 * where A = @fn * @sz, and ES = EEPROM size.
2758 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2761 if (phys_addr < 1024)
2762 return phys_addr + (31 << 10);
2763 if (phys_addr < 1024 + fn)
2764 return 31744 - fn + phys_addr - 1024;
2765 if (phys_addr < EEPROMSIZE)
2766 return phys_addr - 1024 - fn;
2771 * The next two routines implement eeprom read/write from physical addresses.
2773 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2775 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2778 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2779 return vaddr < 0 ? vaddr : 0;
2782 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2784 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2787 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2788 return vaddr < 0 ? vaddr : 0;
2791 #define EEPROM_MAGIC 0x38E2F10C
2793 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2797 struct adapter *adapter = netdev2adap(dev);
2799 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2803 e->magic = EEPROM_MAGIC;
2804 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2805 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2808 memcpy(data, buf + e->offset, e->len);
2813 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2818 u32 aligned_offset, aligned_len, *p;
2819 struct adapter *adapter = netdev2adap(dev);
2821 if (eeprom->magic != EEPROM_MAGIC)
2824 aligned_offset = eeprom->offset & ~3;
2825 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2827 if (adapter->fn > 0) {
2828 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2830 if (aligned_offset < start ||
2831 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2835 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2837 * RMW possibly needed for first or last words.
2839 buf = kmalloc(aligned_len, GFP_KERNEL);
2842 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2843 if (!err && aligned_len > 4)
2844 err = eeprom_rd_phys(adapter,
2845 aligned_offset + aligned_len - 4,
2846 (u32 *)&buf[aligned_len - 4]);
2849 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2853 err = t4_seeprom_wp(adapter, false);
2857 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2858 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2859 aligned_offset += 4;
2863 err = t4_seeprom_wp(adapter, true);
2870 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2873 const struct firmware *fw;
2874 struct adapter *adap = netdev2adap(netdev);
2876 ef->data[sizeof(ef->data) - 1] = '\0';
2877 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2881 ret = t4_load_fw(adap, fw->data, fw->size);
2882 release_firmware(fw);
2884 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2888 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2889 #define BCAST_CRC 0xa0ccc1a6
2891 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2893 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2894 wol->wolopts = netdev2adap(dev)->wol;
2895 memset(&wol->sopass, 0, sizeof(wol->sopass));
2898 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2901 struct port_info *pi = netdev_priv(dev);
2903 if (wol->wolopts & ~WOL_SUPPORTED)
2905 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2906 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2907 if (wol->wolopts & WAKE_BCAST) {
2908 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2911 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2912 ~6ULL, ~0ULL, BCAST_CRC, true);
2914 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2918 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2920 const struct port_info *pi = netdev_priv(dev);
2921 netdev_features_t changed = dev->features ^ features;
2924 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2927 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2929 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2931 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2935 static u32 get_rss_table_size(struct net_device *dev)
2937 const struct port_info *pi = netdev_priv(dev);
2939 return pi->rss_size;
2942 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
2944 const struct port_info *pi = netdev_priv(dev);
2945 unsigned int n = pi->rss_size;
2952 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
2955 struct port_info *pi = netdev_priv(dev);
2957 for (i = 0; i < pi->rss_size; i++)
2959 if (pi->adapter->flags & FULL_INIT_DONE)
2960 return write_rss(pi, pi->rss);
2964 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2967 const struct port_info *pi = netdev_priv(dev);
2969 switch (info->cmd) {
2970 case ETHTOOL_GRXFH: {
2971 unsigned int v = pi->rss_mode;
2974 switch (info->flow_type) {
2976 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2977 info->data = RXH_IP_SRC | RXH_IP_DST |
2978 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2979 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2980 info->data = RXH_IP_SRC | RXH_IP_DST;
2983 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2984 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2985 info->data = RXH_IP_SRC | RXH_IP_DST |
2986 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2987 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2988 info->data = RXH_IP_SRC | RXH_IP_DST;
2991 case AH_ESP_V4_FLOW:
2993 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2994 info->data = RXH_IP_SRC | RXH_IP_DST;
2997 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2998 info->data = RXH_IP_SRC | RXH_IP_DST |
2999 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3000 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3001 info->data = RXH_IP_SRC | RXH_IP_DST;
3004 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3005 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3006 info->data = RXH_IP_SRC | RXH_IP_DST |
3007 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3008 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3009 info->data = RXH_IP_SRC | RXH_IP_DST;
3012 case AH_ESP_V6_FLOW:
3014 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3015 info->data = RXH_IP_SRC | RXH_IP_DST;
3020 case ETHTOOL_GRXRINGS:
3021 info->data = pi->nqsets;
3027 static const struct ethtool_ops cxgb_ethtool_ops = {
3028 .get_settings = get_settings,
3029 .set_settings = set_settings,
3030 .get_drvinfo = get_drvinfo,
3031 .get_msglevel = get_msglevel,
3032 .set_msglevel = set_msglevel,
3033 .get_ringparam = get_sge_param,
3034 .set_ringparam = set_sge_param,
3035 .get_coalesce = get_coalesce,
3036 .set_coalesce = set_coalesce,
3037 .get_eeprom_len = get_eeprom_len,
3038 .get_eeprom = get_eeprom,
3039 .set_eeprom = set_eeprom,
3040 .get_pauseparam = get_pauseparam,
3041 .set_pauseparam = set_pauseparam,
3042 .get_link = ethtool_op_get_link,
3043 .get_strings = get_strings,
3044 .set_phys_id = identify_port,
3045 .nway_reset = restart_autoneg,
3046 .get_sset_count = get_sset_count,
3047 .get_ethtool_stats = get_stats,
3048 .get_regs_len = get_regs_len,
3049 .get_regs = get_regs,
3052 .get_rxnfc = get_rxnfc,
3053 .get_rxfh_indir_size = get_rss_table_size,
3054 .get_rxfh = get_rss_table,
3055 .set_rxfh = set_rss_table,
3056 .flash_device = set_flash,
3062 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3066 loff_t avail = file_inode(file)->i_size;
3067 unsigned int mem = (uintptr_t)file->private_data & 3;
3068 struct adapter *adap = file->private_data - mem;
3074 if (count > avail - pos)
3075 count = avail - pos;
3082 if ((mem == MEM_MC) || (mem == MEM_MC1))
3083 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
3085 ret = t4_edc_read(adap, mem, pos, data, NULL);
3089 ofst = pos % sizeof(data);
3090 len = min(count, sizeof(data) - ofst);
3091 if (copy_to_user(buf, (u8 *)data + ofst, len))
3098 count = pos - *ppos;
3103 static const struct file_operations mem_debugfs_fops = {
3104 .owner = THIS_MODULE,
3105 .open = simple_open,
3107 .llseek = default_llseek,
3110 static void add_debugfs_mem(struct adapter *adap, const char *name,
3111 unsigned int idx, unsigned int size_mb)
3115 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3116 (void *)adap + idx, &mem_debugfs_fops);
3117 if (de && de->d_inode)
3118 de->d_inode->i_size = size_mb << 20;
3121 static int setup_debugfs(struct adapter *adap)
3126 if (IS_ERR_OR_NULL(adap->debugfs_root))
3129 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
3130 if (i & EDRAM0_ENABLE) {
3131 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3132 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3134 if (i & EDRAM1_ENABLE) {
3135 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3136 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3138 if (is_t4(adap->params.chip)) {
3139 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3140 if (i & EXT_MEM_ENABLE)
3141 add_debugfs_mem(adap, "mc", MEM_MC,
3142 EXT_MEM_SIZE_GET(size));
3144 if (i & EXT_MEM_ENABLE) {
3145 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3146 add_debugfs_mem(adap, "mc0", MEM_MC0,
3147 EXT_MEM_SIZE_GET(size));
3149 if (i & EXT_MEM1_ENABLE) {
3150 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3151 add_debugfs_mem(adap, "mc1", MEM_MC1,
3152 EXT_MEM_SIZE_GET(size));
3156 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3162 * upper-layer driver support
3166 * Allocate an active-open TID and set it to the supplied value.
3168 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3172 spin_lock_bh(&t->atid_lock);
3174 union aopen_entry *p = t->afree;
3176 atid = (p - t->atid_tab) + t->atid_base;
3181 spin_unlock_bh(&t->atid_lock);
3184 EXPORT_SYMBOL(cxgb4_alloc_atid);
3187 * Release an active-open TID.
3189 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3191 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3193 spin_lock_bh(&t->atid_lock);
3197 spin_unlock_bh(&t->atid_lock);
3199 EXPORT_SYMBOL(cxgb4_free_atid);
3202 * Allocate a server TID and set it to the supplied value.
3204 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3208 spin_lock_bh(&t->stid_lock);
3209 if (family == PF_INET) {
3210 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3211 if (stid < t->nstids)
3212 __set_bit(stid, t->stid_bmap);
3216 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3221 t->stid_tab[stid].data = data;
3222 stid += t->stid_base;
3223 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3224 * This is equivalent to 4 TIDs. With CLIP enabled it
3227 if (family == PF_INET)
3230 t->stids_in_use += 4;
3232 spin_unlock_bh(&t->stid_lock);
3235 EXPORT_SYMBOL(cxgb4_alloc_stid);
3237 /* Allocate a server filter TID and set it to the supplied value.
3239 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3243 spin_lock_bh(&t->stid_lock);
3244 if (family == PF_INET) {
3245 stid = find_next_zero_bit(t->stid_bmap,
3246 t->nstids + t->nsftids, t->nstids);
3247 if (stid < (t->nstids + t->nsftids))
3248 __set_bit(stid, t->stid_bmap);
3255 t->stid_tab[stid].data = data;
3257 stid += t->sftid_base;
3260 spin_unlock_bh(&t->stid_lock);
3263 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3265 /* Release a server TID.
3267 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3269 /* Is it a server filter TID? */
3270 if (t->nsftids && (stid >= t->sftid_base)) {
3271 stid -= t->sftid_base;
3274 stid -= t->stid_base;
3277 spin_lock_bh(&t->stid_lock);
3278 if (family == PF_INET)
3279 __clear_bit(stid, t->stid_bmap);
3281 bitmap_release_region(t->stid_bmap, stid, 2);
3282 t->stid_tab[stid].data = NULL;
3283 if (family == PF_INET)
3286 t->stids_in_use -= 4;
3287 spin_unlock_bh(&t->stid_lock);
3289 EXPORT_SYMBOL(cxgb4_free_stid);
3292 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3294 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3297 struct cpl_tid_release *req;
3299 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3300 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3301 INIT_TP_WR(req, tid);
3302 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3306 * Queue a TID release request and if necessary schedule a work queue to
3309 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3312 void **p = &t->tid_tab[tid];
3313 struct adapter *adap = container_of(t, struct adapter, tids);
3315 spin_lock_bh(&adap->tid_release_lock);
3316 *p = adap->tid_release_head;
3317 /* Low 2 bits encode the Tx channel number */
3318 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3319 if (!adap->tid_release_task_busy) {
3320 adap->tid_release_task_busy = true;
3321 queue_work(workq, &adap->tid_release_task);
3323 spin_unlock_bh(&adap->tid_release_lock);
3327 * Process the list of pending TID release requests.
3329 static void process_tid_release_list(struct work_struct *work)
3331 struct sk_buff *skb;
3332 struct adapter *adap;
3334 adap = container_of(work, struct adapter, tid_release_task);
3336 spin_lock_bh(&adap->tid_release_lock);
3337 while (adap->tid_release_head) {
3338 void **p = adap->tid_release_head;
3339 unsigned int chan = (uintptr_t)p & 3;
3340 p = (void *)p - chan;
3342 adap->tid_release_head = *p;
3344 spin_unlock_bh(&adap->tid_release_lock);
3346 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3348 schedule_timeout_uninterruptible(1);
3350 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3351 t4_ofld_send(adap, skb);
3352 spin_lock_bh(&adap->tid_release_lock);
3354 adap->tid_release_task_busy = false;
3355 spin_unlock_bh(&adap->tid_release_lock);
3359 * Release a TID and inform HW. If we are unable to allocate the release
3360 * message we defer to a work queue.
3362 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3365 struct sk_buff *skb;
3366 struct adapter *adap = container_of(t, struct adapter, tids);
3368 old = t->tid_tab[tid];
3369 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3371 t->tid_tab[tid] = NULL;
3372 mk_tid_release(skb, chan, tid);
3373 t4_ofld_send(adap, skb);
3375 cxgb4_queue_tid_release(t, chan, tid);
3377 atomic_dec(&t->tids_in_use);
3379 EXPORT_SYMBOL(cxgb4_remove_tid);
3382 * Allocate and initialize the TID tables. Returns 0 on success.
3384 static int tid_init(struct tid_info *t)
3387 unsigned int stid_bmap_size;
3388 unsigned int natids = t->natids;
3389 struct adapter *adap = container_of(t, struct adapter, tids);
3391 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3392 size = t->ntids * sizeof(*t->tid_tab) +
3393 natids * sizeof(*t->atid_tab) +
3394 t->nstids * sizeof(*t->stid_tab) +
3395 t->nsftids * sizeof(*t->stid_tab) +
3396 stid_bmap_size * sizeof(long) +
3397 t->nftids * sizeof(*t->ftid_tab) +
3398 t->nsftids * sizeof(*t->ftid_tab);
3400 t->tid_tab = t4_alloc_mem(size);
3404 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3405 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3406 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3407 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3408 spin_lock_init(&t->stid_lock);
3409 spin_lock_init(&t->atid_lock);
3411 t->stids_in_use = 0;
3413 t->atids_in_use = 0;
3414 atomic_set(&t->tids_in_use, 0);
3416 /* Setup the free list for atid_tab and clear the stid bitmap. */
3419 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3420 t->afree = t->atid_tab;
3422 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3423 /* Reserve stid 0 for T4/T5 adapters */
3424 if (!t->stid_base &&
3425 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3426 __set_bit(0, t->stid_bmap);
3431 static int cxgb4_clip_get(const struct net_device *dev,
3432 const struct in6_addr *lip)
3434 struct adapter *adap;
3435 struct fw_clip_cmd c;
3437 adap = netdev2adap(dev);
3438 memset(&c, 0, sizeof(c));
3439 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3440 FW_CMD_REQUEST | FW_CMD_WRITE);
3441 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3442 c.ip_hi = *(__be64 *)(lip->s6_addr);
3443 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3444 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3447 static int cxgb4_clip_release(const struct net_device *dev,
3448 const struct in6_addr *lip)
3450 struct adapter *adap;
3451 struct fw_clip_cmd c;
3453 adap = netdev2adap(dev);
3454 memset(&c, 0, sizeof(c));
3455 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3456 FW_CMD_REQUEST | FW_CMD_READ);
3457 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3458 c.ip_hi = *(__be64 *)(lip->s6_addr);
3459 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3460 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3464 * cxgb4_create_server - create an IP server
3466 * @stid: the server TID
3467 * @sip: local IP address to bind server to
3468 * @sport: the server's TCP port
3469 * @queue: queue to direct messages from this server to
3471 * Create an IP server for the given port and address.
3472 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3474 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3475 __be32 sip, __be16 sport, __be16 vlan,
3479 struct sk_buff *skb;
3480 struct adapter *adap;
3481 struct cpl_pass_open_req *req;
3484 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3488 adap = netdev2adap(dev);
3489 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3491 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3492 req->local_port = sport;
3493 req->peer_port = htons(0);
3494 req->local_ip = sip;
3495 req->peer_ip = htonl(0);
3496 chan = rxq_to_chan(&adap->sge, queue);
3497 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3498 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3499 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3500 ret = t4_mgmt_tx(adap, skb);
3501 return net_xmit_eval(ret);
3503 EXPORT_SYMBOL(cxgb4_create_server);
3505 /* cxgb4_create_server6 - create an IPv6 server
3507 * @stid: the server TID
3508 * @sip: local IPv6 address to bind server to
3509 * @sport: the server's TCP port
3510 * @queue: queue to direct messages from this server to
3512 * Create an IPv6 server for the given port and address.
3513 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3515 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3516 const struct in6_addr *sip, __be16 sport,
3520 struct sk_buff *skb;
3521 struct adapter *adap;
3522 struct cpl_pass_open_req6 *req;
3525 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3529 adap = netdev2adap(dev);
3530 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3532 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3533 req->local_port = sport;
3534 req->peer_port = htons(0);
3535 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3536 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3537 req->peer_ip_hi = cpu_to_be64(0);
3538 req->peer_ip_lo = cpu_to_be64(0);
3539 chan = rxq_to_chan(&adap->sge, queue);
3540 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3541 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3542 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3543 ret = t4_mgmt_tx(adap, skb);
3544 return net_xmit_eval(ret);
3546 EXPORT_SYMBOL(cxgb4_create_server6);
3548 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3549 unsigned int queue, bool ipv6)
3551 struct sk_buff *skb;
3552 struct adapter *adap;
3553 struct cpl_close_listsvr_req *req;
3556 adap = netdev2adap(dev);
3558 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3562 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3564 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3565 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3566 LISTSVR_IPV6(0)) | QUEUENO(queue));
3567 ret = t4_mgmt_tx(adap, skb);
3568 return net_xmit_eval(ret);
3570 EXPORT_SYMBOL(cxgb4_remove_server);
3573 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3574 * @mtus: the HW MTU table
3575 * @mtu: the target MTU
3576 * @idx: index of selected entry in the MTU table
3578 * Returns the index and the value in the HW MTU table that is closest to
3579 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3580 * table, in which case that smallest available value is selected.
3582 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3587 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3593 EXPORT_SYMBOL(cxgb4_best_mtu);
3596 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3597 * @mtus: the HW MTU table
3598 * @header_size: Header Size
3599 * @data_size_max: maximum Data Segment Size
3600 * @data_size_align: desired Data Segment Size Alignment (2^N)
3601 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3603 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3604 * MTU Table based solely on a Maximum MTU parameter, we break that
3605 * parameter up into a Header Size and Maximum Data Segment Size, and
3606 * provide a desired Data Segment Size Alignment. If we find an MTU in
3607 * the Hardware MTU Table which will result in a Data Segment Size with
3608 * the requested alignment _and_ that MTU isn't "too far" from the
3609 * closest MTU, then we'll return that rather than the closest MTU.
3611 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3612 unsigned short header_size,
3613 unsigned short data_size_max,
3614 unsigned short data_size_align,
3615 unsigned int *mtu_idxp)
3617 unsigned short max_mtu = header_size + data_size_max;
3618 unsigned short data_size_align_mask = data_size_align - 1;
3619 int mtu_idx, aligned_mtu_idx;
3621 /* Scan the MTU Table till we find an MTU which is larger than our
3622 * Maximum MTU or we reach the end of the table. Along the way,
3623 * record the last MTU found, if any, which will result in a Data
3624 * Segment Length matching the requested alignment.
3626 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3627 unsigned short data_size = mtus[mtu_idx] - header_size;
3629 /* If this MTU minus the Header Size would result in a
3630 * Data Segment Size of the desired alignment, remember it.
3632 if ((data_size & data_size_align_mask) == 0)
3633 aligned_mtu_idx = mtu_idx;
3635 /* If we're not at the end of the Hardware MTU Table and the
3636 * next element is larger than our Maximum MTU, drop out of
3639 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3643 /* If we fell out of the loop because we ran to the end of the table,
3644 * then we just have to use the last [largest] entry.
3646 if (mtu_idx == NMTUS)
3649 /* If we found an MTU which resulted in the requested Data Segment
3650 * Length alignment and that's "not far" from the largest MTU which is
3651 * less than or equal to the maximum MTU, then use that.
3653 if (aligned_mtu_idx >= 0 &&
3654 mtu_idx - aligned_mtu_idx <= 1)
3655 mtu_idx = aligned_mtu_idx;
3657 /* If the caller has passed in an MTU Index pointer, pass the
3658 * MTU Index back. Return the MTU value.
3661 *mtu_idxp = mtu_idx;
3662 return mtus[mtu_idx];
3664 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3667 * cxgb4_port_chan - get the HW channel of a port
3668 * @dev: the net device for the port
3670 * Return the HW Tx channel of the given port.
3672 unsigned int cxgb4_port_chan(const struct net_device *dev)
3674 return netdev2pinfo(dev)->tx_chan;
3676 EXPORT_SYMBOL(cxgb4_port_chan);
3678 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3680 struct adapter *adap = netdev2adap(dev);
3681 u32 v1, v2, lp_count, hp_count;
3683 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3684 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3685 if (is_t4(adap->params.chip)) {
3686 lp_count = G_LP_COUNT(v1);
3687 hp_count = G_HP_COUNT(v1);
3689 lp_count = G_LP_COUNT_T5(v1);
3690 hp_count = G_HP_COUNT_T5(v2);
3692 return lpfifo ? lp_count : hp_count;
3694 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3697 * cxgb4_port_viid - get the VI id of a port
3698 * @dev: the net device for the port
3700 * Return the VI id of the given port.
3702 unsigned int cxgb4_port_viid(const struct net_device *dev)
3704 return netdev2pinfo(dev)->viid;
3706 EXPORT_SYMBOL(cxgb4_port_viid);
3709 * cxgb4_port_idx - get the index of a port
3710 * @dev: the net device for the port
3712 * Return the index of the given port.
3714 unsigned int cxgb4_port_idx(const struct net_device *dev)
3716 return netdev2pinfo(dev)->port_id;
3718 EXPORT_SYMBOL(cxgb4_port_idx);
3720 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3721 struct tp_tcp_stats *v6)
3723 struct adapter *adap = pci_get_drvdata(pdev);
3725 spin_lock(&adap->stats_lock);
3726 t4_tp_get_tcp_stats(adap, v4, v6);
3727 spin_unlock(&adap->stats_lock);
3729 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3731 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3732 const unsigned int *pgsz_order)
3734 struct adapter *adap = netdev2adap(dev);
3736 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3737 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3738 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3739 HPZ3(pgsz_order[3]));
3741 EXPORT_SYMBOL(cxgb4_iscsi_init);
3743 int cxgb4_flush_eq_cache(struct net_device *dev)
3745 struct adapter *adap = netdev2adap(dev);
3748 ret = t4_fwaddrspace_write(adap, adap->mbox,
3749 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3752 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3754 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3756 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3760 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3762 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3763 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3768 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3771 struct adapter *adap = netdev2adap(dev);
3772 u16 hw_pidx, hw_cidx;
3775 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3779 if (pidx != hw_pidx) {
3782 if (pidx >= hw_pidx)
3783 delta = pidx - hw_pidx;
3785 delta = size - hw_pidx + pidx;
3787 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3788 QID(qid) | PIDX(delta));
3793 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3795 void cxgb4_disable_db_coalescing(struct net_device *dev)
3797 struct adapter *adap;
3799 adap = netdev2adap(dev);
3800 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3803 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3805 void cxgb4_enable_db_coalescing(struct net_device *dev)
3807 struct adapter *adap;
3809 adap = netdev2adap(dev);
3810 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3812 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3814 static struct pci_driver cxgb4_driver;
3816 static void check_neigh_update(struct neighbour *neigh)
3818 const struct device *parent;
3819 const struct net_device *netdev = neigh->dev;
3821 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3822 netdev = vlan_dev_real_dev(netdev);
3823 parent = netdev->dev.parent;
3824 if (parent && parent->driver == &cxgb4_driver.driver)
3825 t4_l2t_update(dev_get_drvdata(parent), neigh);
3828 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3832 case NETEVENT_NEIGH_UPDATE:
3833 check_neigh_update(data);
3835 case NETEVENT_REDIRECT:
3842 static bool netevent_registered;
3843 static struct notifier_block cxgb4_netevent_nb = {
3844 .notifier_call = netevent_cb
3847 static void drain_db_fifo(struct adapter *adap, int usecs)
3849 u32 v1, v2, lp_count, hp_count;
3852 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3853 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3854 if (is_t4(adap->params.chip)) {
3855 lp_count = G_LP_COUNT(v1);
3856 hp_count = G_HP_COUNT(v1);
3858 lp_count = G_LP_COUNT_T5(v1);
3859 hp_count = G_HP_COUNT_T5(v2);
3862 if (lp_count == 0 && hp_count == 0)
3864 set_current_state(TASK_UNINTERRUPTIBLE);
3865 schedule_timeout(usecs_to_jiffies(usecs));
3869 static void disable_txq_db(struct sge_txq *q)
3871 unsigned long flags;
3873 spin_lock_irqsave(&q->db_lock, flags);
3875 spin_unlock_irqrestore(&q->db_lock, flags);
3878 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
3880 spin_lock_irq(&q->db_lock);
3881 if (q->db_pidx_inc) {
3882 /* Make sure that all writes to the TX descriptors
3883 * are committed before we tell HW about them.
3886 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3887 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
3891 spin_unlock_irq(&q->db_lock);
3894 static void disable_dbs(struct adapter *adap)
3898 for_each_ethrxq(&adap->sge, i)
3899 disable_txq_db(&adap->sge.ethtxq[i].q);
3900 for_each_ofldrxq(&adap->sge, i)
3901 disable_txq_db(&adap->sge.ofldtxq[i].q);
3902 for_each_port(adap, i)
3903 disable_txq_db(&adap->sge.ctrlq[i].q);
3906 static void enable_dbs(struct adapter *adap)
3910 for_each_ethrxq(&adap->sge, i)
3911 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
3912 for_each_ofldrxq(&adap->sge, i)
3913 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
3914 for_each_port(adap, i)
3915 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
3918 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3920 if (adap->uld_handle[CXGB4_ULD_RDMA])
3921 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3925 static void process_db_full(struct work_struct *work)
3927 struct adapter *adap;
3929 adap = container_of(work, struct adapter, db_full_task);
3931 drain_db_fifo(adap, dbfifo_drain_delay);
3933 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3934 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3935 DBFIFO_HP_INT | DBFIFO_LP_INT,
3936 DBFIFO_HP_INT | DBFIFO_LP_INT);
3939 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3941 u16 hw_pidx, hw_cidx;
3944 spin_lock_irq(&q->db_lock);
3945 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3948 if (q->db_pidx != hw_pidx) {
3951 if (q->db_pidx >= hw_pidx)
3952 delta = q->db_pidx - hw_pidx;
3954 delta = q->size - hw_pidx + q->db_pidx;
3956 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3957 QID(q->cntxt_id) | PIDX(delta));
3962 spin_unlock_irq(&q->db_lock);
3964 CH_WARN(adap, "DB drop recovery failed.\n");
3966 static void recover_all_queues(struct adapter *adap)
3970 for_each_ethrxq(&adap->sge, i)
3971 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3972 for_each_ofldrxq(&adap->sge, i)
3973 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3974 for_each_port(adap, i)
3975 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3978 static void process_db_drop(struct work_struct *work)
3980 struct adapter *adap;
3982 adap = container_of(work, struct adapter, db_drop_task);
3984 if (is_t4(adap->params.chip)) {
3985 drain_db_fifo(adap, dbfifo_drain_delay);
3986 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3987 drain_db_fifo(adap, dbfifo_drain_delay);
3988 recover_all_queues(adap);
3989 drain_db_fifo(adap, dbfifo_drain_delay);
3991 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3993 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3994 u16 qid = (dropped_db >> 15) & 0x1ffff;
3995 u16 pidx_inc = dropped_db & 0x1fff;
3997 unsigned short udb_density;
3998 unsigned long qpshift;
4002 dev_warn(adap->pdev_dev,
4003 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4005 (dropped_db >> 14) & 1,
4006 (dropped_db >> 13) & 1,
4009 drain_db_fifo(adap, 1);
4011 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4012 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4013 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4014 qpshift = PAGE_SHIFT - ilog2(udb_density);
4015 udb = qid << qpshift;
4017 page = udb / PAGE_SIZE;
4018 udb += (qid - (page * udb_density)) * 128;
4020 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4022 /* Re-enable BAR2 WC */
4023 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4026 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4029 void t4_db_full(struct adapter *adap)
4031 if (is_t4(adap->params.chip)) {
4033 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4034 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4035 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4036 queue_work(workq, &adap->db_full_task);
4040 void t4_db_dropped(struct adapter *adap)
4042 if (is_t4(adap->params.chip)) {
4044 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4046 queue_work(workq, &adap->db_drop_task);
4049 static void uld_attach(struct adapter *adap, unsigned int uld)
4052 struct cxgb4_lld_info lli;
4055 lli.pdev = adap->pdev;
4056 lli.l2t = adap->l2t;
4057 lli.tids = &adap->tids;
4058 lli.ports = adap->port;
4059 lli.vr = &adap->vres;
4060 lli.mtus = adap->params.mtus;
4061 if (uld == CXGB4_ULD_RDMA) {
4062 lli.rxq_ids = adap->sge.rdma_rxq;
4063 lli.ciq_ids = adap->sge.rdma_ciq;
4064 lli.nrxq = adap->sge.rdmaqs;
4065 lli.nciq = adap->sge.rdmaciqs;
4066 } else if (uld == CXGB4_ULD_ISCSI) {
4067 lli.rxq_ids = adap->sge.ofld_rxq;
4068 lli.nrxq = adap->sge.ofldqsets;
4070 lli.ntxq = adap->sge.ofldqsets;
4071 lli.nchan = adap->params.nports;
4072 lli.nports = adap->params.nports;
4073 lli.wr_cred = adap->params.ofldq_wr_cred;
4074 lli.adapter_type = adap->params.chip;
4075 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4076 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4077 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4079 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4080 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4082 lli.filt_mode = adap->params.tp.vlan_pri_map;
4083 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4084 for (i = 0; i < NCHAN; i++)
4086 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4087 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4088 lli.fw_vers = adap->params.fw_vers;
4089 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4090 lli.sge_pktshift = adap->sge.pktshift;
4091 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4092 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4094 handle = ulds[uld].add(&lli);
4095 if (IS_ERR(handle)) {
4096 dev_warn(adap->pdev_dev,
4097 "could not attach to the %s driver, error %ld\n",
4098 uld_str[uld], PTR_ERR(handle));
4102 adap->uld_handle[uld] = handle;
4104 if (!netevent_registered) {
4105 register_netevent_notifier(&cxgb4_netevent_nb);
4106 netevent_registered = true;
4109 if (adap->flags & FULL_INIT_DONE)
4110 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4113 static void attach_ulds(struct adapter *adap)
4117 spin_lock(&adap_rcu_lock);
4118 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4119 spin_unlock(&adap_rcu_lock);
4121 mutex_lock(&uld_mutex);
4122 list_add_tail(&adap->list_node, &adapter_list);
4123 for (i = 0; i < CXGB4_ULD_MAX; i++)
4125 uld_attach(adap, i);
4126 mutex_unlock(&uld_mutex);
4129 static void detach_ulds(struct adapter *adap)
4133 mutex_lock(&uld_mutex);
4134 list_del(&adap->list_node);
4135 for (i = 0; i < CXGB4_ULD_MAX; i++)
4136 if (adap->uld_handle[i]) {
4137 ulds[i].state_change(adap->uld_handle[i],
4138 CXGB4_STATE_DETACH);
4139 adap->uld_handle[i] = NULL;
4141 if (netevent_registered && list_empty(&adapter_list)) {
4142 unregister_netevent_notifier(&cxgb4_netevent_nb);
4143 netevent_registered = false;
4145 mutex_unlock(&uld_mutex);
4147 spin_lock(&adap_rcu_lock);
4148 list_del_rcu(&adap->rcu_node);
4149 spin_unlock(&adap_rcu_lock);
4152 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4156 mutex_lock(&uld_mutex);
4157 for (i = 0; i < CXGB4_ULD_MAX; i++)
4158 if (adap->uld_handle[i])
4159 ulds[i].state_change(adap->uld_handle[i], new_state);
4160 mutex_unlock(&uld_mutex);
4164 * cxgb4_register_uld - register an upper-layer driver
4165 * @type: the ULD type
4166 * @p: the ULD methods
4168 * Registers an upper-layer driver with this driver and notifies the ULD
4169 * about any presently available devices that support its type. Returns
4170 * %-EBUSY if a ULD of the same type is already registered.
4172 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4175 struct adapter *adap;
4177 if (type >= CXGB4_ULD_MAX)
4179 mutex_lock(&uld_mutex);
4180 if (ulds[type].add) {
4185 list_for_each_entry(adap, &adapter_list, list_node)
4186 uld_attach(adap, type);
4187 out: mutex_unlock(&uld_mutex);
4190 EXPORT_SYMBOL(cxgb4_register_uld);
4193 * cxgb4_unregister_uld - unregister an upper-layer driver
4194 * @type: the ULD type
4196 * Unregisters an existing upper-layer driver.
4198 int cxgb4_unregister_uld(enum cxgb4_uld type)
4200 struct adapter *adap;
4202 if (type >= CXGB4_ULD_MAX)
4204 mutex_lock(&uld_mutex);
4205 list_for_each_entry(adap, &adapter_list, list_node)
4206 adap->uld_handle[type] = NULL;
4207 ulds[type].add = NULL;
4208 mutex_unlock(&uld_mutex);
4211 EXPORT_SYMBOL(cxgb4_unregister_uld);
4213 /* Check if netdev on which event is occured belongs to us or not. Return
4214 * suceess (1) if it belongs otherwise failure (0).
4216 static int cxgb4_netdev(struct net_device *netdev)
4218 struct adapter *adap;
4221 spin_lock(&adap_rcu_lock);
4222 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4223 for (i = 0; i < MAX_NPORTS; i++)
4224 if (adap->port[i] == netdev) {
4225 spin_unlock(&adap_rcu_lock);
4228 spin_unlock(&adap_rcu_lock);
4232 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4233 unsigned long event)
4235 int ret = NOTIFY_DONE;
4238 if (cxgb4_netdev(event_dev)) {
4241 ret = cxgb4_clip_get(event_dev,
4242 (const struct in6_addr *)ifa->addr.s6_addr);
4250 cxgb4_clip_release(event_dev,
4251 (const struct in6_addr *)ifa->addr.s6_addr);
4262 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4263 unsigned long event, void *data)
4265 struct inet6_ifaddr *ifa = data;
4266 struct net_device *event_dev;
4267 int ret = NOTIFY_DONE;
4268 struct bonding *bond = netdev_priv(ifa->idev->dev);
4269 struct list_head *iter;
4270 struct slave *slave;
4271 struct pci_dev *first_pdev = NULL;
4273 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4274 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4275 ret = clip_add(event_dev, ifa, event);
4276 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4277 /* It is possible that two different adapters are bonded in one
4278 * bond. We need to find such different adapters and add clip
4279 * in all of them only once.
4281 read_lock(&bond->lock);
4282 bond_for_each_slave(bond, slave, iter) {
4284 ret = clip_add(slave->dev, ifa, event);
4285 /* If clip_add is success then only initialize
4286 * first_pdev since it means it is our device
4288 if (ret == NOTIFY_OK)
4289 first_pdev = to_pci_dev(
4290 slave->dev->dev.parent);
4291 } else if (first_pdev !=
4292 to_pci_dev(slave->dev->dev.parent))
4293 ret = clip_add(slave->dev, ifa, event);
4295 read_unlock(&bond->lock);
4297 ret = clip_add(ifa->idev->dev, ifa, event);
4302 static struct notifier_block cxgb4_inet6addr_notifier = {
4303 .notifier_call = cxgb4_inet6addr_handler
4306 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4307 * a physical device.
4308 * The physical device reference is needed to send the actul CLIP command.
4310 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4312 struct inet6_dev *idev = NULL;
4313 struct inet6_ifaddr *ifa;
4316 idev = __in6_dev_get(root_dev);
4320 read_lock_bh(&idev->lock);
4321 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4322 ret = cxgb4_clip_get(dev,
4323 (const struct in6_addr *)ifa->addr.s6_addr);
4327 read_unlock_bh(&idev->lock);
4332 static int update_root_dev_clip(struct net_device *dev)
4334 struct net_device *root_dev = NULL;
4337 /* First populate the real net device's IPv6 addresses */
4338 ret = update_dev_clip(dev, dev);
4342 /* Parse all bond and vlan devices layered on top of the physical dev */
4343 for (i = 0; i < VLAN_N_VID; i++) {
4344 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4348 ret = update_dev_clip(root_dev, dev);
4355 static void update_clip(const struct adapter *adap)
4358 struct net_device *dev;
4363 for (i = 0; i < MAX_NPORTS; i++) {
4364 dev = adap->port[i];
4368 ret = update_root_dev_clip(dev);
4377 * cxgb_up - enable the adapter
4378 * @adap: adapter being enabled
4380 * Called when the first port is enabled, this function performs the
4381 * actions necessary to make an adapter operational, such as completing
4382 * the initialization of HW modules, and enabling interrupts.
4384 * Must be called with the rtnl lock held.
4386 static int cxgb_up(struct adapter *adap)
4390 err = setup_sge_queues(adap);
4393 err = setup_rss(adap);
4397 if (adap->flags & USING_MSIX) {
4398 name_msix_vecs(adap);
4399 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4400 adap->msix_info[0].desc, adap);
4404 err = request_msix_queue_irqs(adap);
4406 free_irq(adap->msix_info[0].vec, adap);
4410 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4411 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4412 adap->port[0]->name, adap);
4418 t4_intr_enable(adap);
4419 adap->flags |= FULL_INIT_DONE;
4420 notify_ulds(adap, CXGB4_STATE_UP);
4425 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4427 t4_free_sge_resources(adap);
4431 static void cxgb_down(struct adapter *adapter)
4433 t4_intr_disable(adapter);
4434 cancel_work_sync(&adapter->tid_release_task);
4435 cancel_work_sync(&adapter->db_full_task);
4436 cancel_work_sync(&adapter->db_drop_task);
4437 adapter->tid_release_task_busy = false;
4438 adapter->tid_release_head = NULL;
4440 if (adapter->flags & USING_MSIX) {
4441 free_msix_queue_irqs(adapter);
4442 free_irq(adapter->msix_info[0].vec, adapter);
4444 free_irq(adapter->pdev->irq, adapter);
4445 quiesce_rx(adapter);
4446 t4_sge_stop(adapter);
4447 t4_free_sge_resources(adapter);
4448 adapter->flags &= ~FULL_INIT_DONE;
4452 * net_device operations
4454 static int cxgb_open(struct net_device *dev)
4457 struct port_info *pi = netdev_priv(dev);
4458 struct adapter *adapter = pi->adapter;
4460 netif_carrier_off(dev);
4462 if (!(adapter->flags & FULL_INIT_DONE)) {
4463 err = cxgb_up(adapter);
4468 err = link_start(dev);
4470 netif_tx_start_all_queues(dev);
4474 static int cxgb_close(struct net_device *dev)
4476 struct port_info *pi = netdev_priv(dev);
4477 struct adapter *adapter = pi->adapter;
4479 netif_tx_stop_all_queues(dev);
4480 netif_carrier_off(dev);
4481 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4484 /* Return an error number if the indicated filter isn't writable ...
4486 static int writable_filter(struct filter_entry *f)
4496 /* Delete the filter at the specified index (if valid). The checks for all
4497 * the common problems with doing this like the filter being locked, currently
4498 * pending in another operation, etc.
4500 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4502 struct filter_entry *f;
4505 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4508 f = &adapter->tids.ftid_tab[fidx];
4509 ret = writable_filter(f);
4513 return del_filter_wr(adapter, fidx);
4518 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4519 __be32 sip, __be16 sport, __be16 vlan,
4520 unsigned int queue, unsigned char port, unsigned char mask)
4523 struct filter_entry *f;
4524 struct adapter *adap;
4528 adap = netdev2adap(dev);
4530 /* Adjust stid to correct filter index */
4531 stid -= adap->tids.sftid_base;
4532 stid += adap->tids.nftids;
4534 /* Check to make sure the filter requested is writable ...
4536 f = &adap->tids.ftid_tab[stid];
4537 ret = writable_filter(f);
4541 /* Clear out any old resources being used by the filter before
4542 * we start constructing the new filter.
4545 clear_filter(adap, f);
4547 /* Clear out filter specifications */
4548 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4549 f->fs.val.lport = cpu_to_be16(sport);
4550 f->fs.mask.lport = ~0;
4552 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4553 for (i = 0; i < 4; i++) {
4554 f->fs.val.lip[i] = val[i];
4555 f->fs.mask.lip[i] = ~0;
4557 if (adap->params.tp.vlan_pri_map & F_PORT) {
4558 f->fs.val.iport = port;
4559 f->fs.mask.iport = mask;
4563 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4564 f->fs.val.proto = IPPROTO_TCP;
4565 f->fs.mask.proto = ~0;
4570 /* Mark filter as locked */
4574 ret = set_filter_wr(adap, stid);
4576 clear_filter(adap, f);
4582 EXPORT_SYMBOL(cxgb4_create_server_filter);
4584 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4585 unsigned int queue, bool ipv6)
4588 struct filter_entry *f;
4589 struct adapter *adap;
4591 adap = netdev2adap(dev);
4593 /* Adjust stid to correct filter index */
4594 stid -= adap->tids.sftid_base;
4595 stid += adap->tids.nftids;
4597 f = &adap->tids.ftid_tab[stid];
4598 /* Unlock the filter */
4601 ret = delete_filter(adap, stid);
4607 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4609 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4610 struct rtnl_link_stats64 *ns)
4612 struct port_stats stats;
4613 struct port_info *p = netdev_priv(dev);
4614 struct adapter *adapter = p->adapter;
4616 /* Block retrieving statistics during EEH error
4617 * recovery. Otherwise, the recovery might fail
4618 * and the PCI device will be removed permanently
4620 spin_lock(&adapter->stats_lock);
4621 if (!netif_device_present(dev)) {
4622 spin_unlock(&adapter->stats_lock);
4625 t4_get_port_stats(adapter, p->tx_chan, &stats);
4626 spin_unlock(&adapter->stats_lock);
4628 ns->tx_bytes = stats.tx_octets;
4629 ns->tx_packets = stats.tx_frames;
4630 ns->rx_bytes = stats.rx_octets;
4631 ns->rx_packets = stats.rx_frames;
4632 ns->multicast = stats.rx_mcast_frames;
4634 /* detailed rx_errors */
4635 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4637 ns->rx_over_errors = 0;
4638 ns->rx_crc_errors = stats.rx_fcs_err;
4639 ns->rx_frame_errors = stats.rx_symbol_err;
4640 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4641 stats.rx_ovflow2 + stats.rx_ovflow3 +
4642 stats.rx_trunc0 + stats.rx_trunc1 +
4643 stats.rx_trunc2 + stats.rx_trunc3;
4644 ns->rx_missed_errors = 0;
4646 /* detailed tx_errors */
4647 ns->tx_aborted_errors = 0;
4648 ns->tx_carrier_errors = 0;
4649 ns->tx_fifo_errors = 0;
4650 ns->tx_heartbeat_errors = 0;
4651 ns->tx_window_errors = 0;
4653 ns->tx_errors = stats.tx_error_frames;
4654 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4655 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4659 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4662 int ret = 0, prtad, devad;
4663 struct port_info *pi = netdev_priv(dev);
4664 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4668 if (pi->mdio_addr < 0)
4670 data->phy_id = pi->mdio_addr;
4674 if (mdio_phy_id_is_c45(data->phy_id)) {
4675 prtad = mdio_phy_id_prtad(data->phy_id);
4676 devad = mdio_phy_id_devad(data->phy_id);
4677 } else if (data->phy_id < 32) {
4678 prtad = data->phy_id;
4680 data->reg_num &= 0x1f;
4684 mbox = pi->adapter->fn;
4685 if (cmd == SIOCGMIIREG)
4686 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4687 data->reg_num, &data->val_out);
4689 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4690 data->reg_num, data->val_in);
4698 static void cxgb_set_rxmode(struct net_device *dev)
4700 /* unfortunately we can't return errors to the stack */
4701 set_rxmode(dev, -1, false);
4704 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4707 struct port_info *pi = netdev_priv(dev);
4709 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4711 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4718 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4721 struct sockaddr *addr = p;
4722 struct port_info *pi = netdev_priv(dev);
4724 if (!is_valid_ether_addr(addr->sa_data))
4725 return -EADDRNOTAVAIL;
4727 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4728 pi->xact_addr_filt, addr->sa_data, true, true);
4732 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4733 pi->xact_addr_filt = ret;
4737 #ifdef CONFIG_NET_POLL_CONTROLLER
4738 static void cxgb_netpoll(struct net_device *dev)
4740 struct port_info *pi = netdev_priv(dev);
4741 struct adapter *adap = pi->adapter;
4743 if (adap->flags & USING_MSIX) {
4745 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4747 for (i = pi->nqsets; i; i--, rx++)
4748 t4_sge_intr_msix(0, &rx->rspq);
4750 t4_intr_handler(adap)(0, adap);
4754 static const struct net_device_ops cxgb4_netdev_ops = {
4755 .ndo_open = cxgb_open,
4756 .ndo_stop = cxgb_close,
4757 .ndo_start_xmit = t4_eth_xmit,
4758 .ndo_select_queue = cxgb_select_queue,
4759 .ndo_get_stats64 = cxgb_get_stats,
4760 .ndo_set_rx_mode = cxgb_set_rxmode,
4761 .ndo_set_mac_address = cxgb_set_mac_addr,
4762 .ndo_set_features = cxgb_set_features,
4763 .ndo_validate_addr = eth_validate_addr,
4764 .ndo_do_ioctl = cxgb_ioctl,
4765 .ndo_change_mtu = cxgb_change_mtu,
4766 #ifdef CONFIG_NET_POLL_CONTROLLER
4767 .ndo_poll_controller = cxgb_netpoll,
4771 void t4_fatal_err(struct adapter *adap)
4773 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4774 t4_intr_disable(adap);
4775 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4778 static void setup_memwin(struct adapter *adap)
4780 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4782 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4783 if (is_t4(adap->params.chip)) {
4784 mem_win0_base = bar0 + MEMWIN0_BASE;
4785 mem_win1_base = bar0 + MEMWIN1_BASE;
4786 mem_win2_base = bar0 + MEMWIN2_BASE;
4788 /* For T5, only relative offset inside the PCIe BAR is passed */
4789 mem_win0_base = MEMWIN0_BASE;
4790 mem_win1_base = MEMWIN1_BASE_T5;
4791 mem_win2_base = MEMWIN2_BASE_T5;
4793 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4794 mem_win0_base | BIR(0) |
4795 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4796 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4797 mem_win1_base | BIR(0) |
4798 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4799 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4800 mem_win2_base | BIR(0) |
4801 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4804 static void setup_memwin_rdma(struct adapter *adap)
4806 if (adap->vres.ocq.size) {
4807 unsigned int start, sz_kb;
4809 start = pci_resource_start(adap->pdev, 2) +
4810 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4811 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4813 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4814 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4816 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4817 adap->vres.ocq.start);
4819 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4823 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4828 /* get device capabilities */
4829 memset(c, 0, sizeof(*c));
4830 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4831 FW_CMD_REQUEST | FW_CMD_READ);
4832 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4833 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4837 /* select capabilities we'll be using */
4838 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4840 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4842 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4843 } else if (vf_acls) {
4844 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4847 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4848 FW_CMD_REQUEST | FW_CMD_WRITE);
4849 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4853 ret = t4_config_glbl_rss(adap, adap->fn,
4854 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4855 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4856 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4860 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4861 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4867 /* tweak some settings */
4868 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4869 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4870 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4871 v = t4_read_reg(adap, TP_PIO_DATA);
4872 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4874 /* first 4 Tx modulation queues point to consecutive Tx channels */
4875 adap->params.tp.tx_modq_map = 0xE4;
4876 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4877 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4879 /* associate each Tx modulation queue with consecutive Tx channels */
4881 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4882 &v, 1, A_TP_TX_SCHED_HDR);
4883 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4884 &v, 1, A_TP_TX_SCHED_FIFO);
4885 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4886 &v, 1, A_TP_TX_SCHED_PCMD);
4888 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4889 if (is_offload(adap)) {
4890 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4891 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4892 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4893 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4894 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4895 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4896 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4897 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4898 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4899 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4902 /* get basic stuff going */
4903 return t4_early_init(adap, adap->fn);
4907 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4909 #define MAX_ATIDS 8192U
4912 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4914 * If the firmware we're dealing with has Configuration File support, then
4915 * we use that to perform all configuration
4919 * Tweak configuration based on module parameters, etc. Most of these have
4920 * defaults assigned to them by Firmware Configuration Files (if we're using
4921 * them) but need to be explicitly set if we're using hard-coded
4922 * initialization. But even in the case of using Firmware Configuration
4923 * Files, we'd like to expose the ability to change these via module
4924 * parameters so these are essentially common tweaks/settings for
4925 * Configuration Files and hard-coded initialization ...
4927 static int adap_init0_tweaks(struct adapter *adapter)
4930 * Fix up various Host-Dependent Parameters like Page Size, Cache
4931 * Line Size, etc. The firmware default is for a 4KB Page Size and
4932 * 64B Cache Line Size ...
4934 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4937 * Process module parameters which affect early initialization.
4939 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4940 dev_err(&adapter->pdev->dev,
4941 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4945 t4_set_reg_field(adapter, SGE_CONTROL,
4947 PKTSHIFT(rx_dma_offset));
4950 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4951 * adds the pseudo header itself.
4953 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4954 CSUM_HAS_PSEUDO_HDR, 0);
4960 * Attempt to initialize the adapter via a Firmware Configuration File.
4962 static int adap_init0_config(struct adapter *adapter, int reset)
4964 struct fw_caps_config_cmd caps_cmd;
4965 const struct firmware *cf;
4966 unsigned long mtype = 0, maddr = 0;
4967 u32 finiver, finicsum, cfcsum;
4969 int config_issued = 0;
4970 char *fw_config_file, fw_config_file_path[256];
4971 char *config_name = NULL;
4974 * Reset device if necessary.
4977 ret = t4_fw_reset(adapter, adapter->mbox,
4978 PIORSTMODE | PIORST);
4984 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4985 * then use that. Otherwise, use the configuration file stored
4986 * in the adapter flash ...
4988 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4990 fw_config_file = FW4_CFNAME;
4993 fw_config_file = FW5_CFNAME;
4996 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4997 adapter->pdev->device);
5002 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5004 config_name = "On FLASH";
5005 mtype = FW_MEMTYPE_CF_FLASH;
5006 maddr = t4_flash_cfg_addr(adapter);
5008 u32 params[7], val[7];
5010 sprintf(fw_config_file_path,
5011 "/lib/firmware/%s", fw_config_file);
5012 config_name = fw_config_file_path;
5014 if (cf->size >= FLASH_CFG_MAX_SIZE)
5017 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5018 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5019 ret = t4_query_params(adapter, adapter->mbox,
5020 adapter->fn, 0, 1, params, val);
5023 * For t4_memory_write() below addresses and
5024 * sizes have to be in terms of multiples of 4
5025 * bytes. So, if the Configuration File isn't
5026 * a multiple of 4 bytes in length we'll have
5027 * to write that out separately since we can't
5028 * guarantee that the bytes following the
5029 * residual byte in the buffer returned by
5030 * request_firmware() are zeroed out ...
5032 size_t resid = cf->size & 0x3;
5033 size_t size = cf->size & ~0x3;
5034 __be32 *data = (__be32 *)cf->data;
5036 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5037 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5039 ret = t4_memory_write(adapter, mtype, maddr,
5041 if (ret == 0 && resid != 0) {
5048 last.word = data[size >> 2];
5049 for (i = resid; i < 4; i++)
5051 ret = t4_memory_write(adapter, mtype,
5058 release_firmware(cf);
5064 * Issue a Capability Configuration command to the firmware to get it
5065 * to parse the Configuration File. We don't use t4_fw_config_file()
5066 * because we want the ability to modify various features after we've
5067 * processed the configuration file ...
5069 memset(&caps_cmd, 0, sizeof(caps_cmd));
5070 caps_cmd.op_to_write =
5071 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5074 caps_cmd.cfvalid_to_len16 =
5075 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5076 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5077 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5078 FW_LEN16(caps_cmd));
5079 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5082 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5083 * Configuration File in FLASH), our last gasp effort is to use the
5084 * Firmware Configuration File which is embedded in the firmware. A
5085 * very few early versions of the firmware didn't have one embedded
5086 * but we can ignore those.
5088 if (ret == -ENOENT) {
5089 memset(&caps_cmd, 0, sizeof(caps_cmd));
5090 caps_cmd.op_to_write =
5091 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5094 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5095 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5096 sizeof(caps_cmd), &caps_cmd);
5097 config_name = "Firmware Default";
5104 finiver = ntohl(caps_cmd.finiver);
5105 finicsum = ntohl(caps_cmd.finicsum);
5106 cfcsum = ntohl(caps_cmd.cfcsum);
5107 if (finicsum != cfcsum)
5108 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5109 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5113 * And now tell the firmware to use the configuration we just loaded.
5115 caps_cmd.op_to_write =
5116 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5119 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5120 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5126 * Tweak configuration based on system architecture, module
5129 ret = adap_init0_tweaks(adapter);
5134 * And finally tell the firmware to initialize itself using the
5135 * parameters from the Configuration File.
5137 ret = t4_fw_initialize(adapter, adapter->mbox);
5142 * Return successfully and note that we're operating with parameters
5143 * not supplied by the driver, rather than from hard-wired
5144 * initialization constants burried in the driver.
5146 adapter->flags |= USING_SOFT_PARAMS;
5147 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5148 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5149 config_name, finiver, cfcsum);
5153 * Something bad happened. Return the error ... (If the "error"
5154 * is that there's no Configuration File on the adapter we don't
5155 * want to issue a warning since this is fairly common.)
5158 if (config_issued && ret != -ENOENT)
5159 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5165 * Attempt to initialize the adapter via hard-coded, driver supplied
5168 static int adap_init0_no_config(struct adapter *adapter, int reset)
5170 struct sge *s = &adapter->sge;
5171 struct fw_caps_config_cmd caps_cmd;
5176 * Reset device if necessary
5179 ret = t4_fw_reset(adapter, adapter->mbox,
5180 PIORSTMODE | PIORST);
5186 * Get device capabilities and select which we'll be using.
5188 memset(&caps_cmd, 0, sizeof(caps_cmd));
5189 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5190 FW_CMD_REQUEST | FW_CMD_READ);
5191 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5192 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5197 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5199 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5201 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5202 } else if (vf_acls) {
5203 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5206 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5207 FW_CMD_REQUEST | FW_CMD_WRITE);
5208 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5214 * Tweak configuration based on system architecture, module
5217 ret = adap_init0_tweaks(adapter);
5222 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5223 * mode which maps each Virtual Interface to its own section of
5224 * the RSS Table and we turn on all map and hash enables ...
5226 adapter->flags |= RSS_TNLALLLOOKUP;
5227 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5228 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5229 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5230 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5231 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5232 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5237 * Set up our own fundamental resource provisioning ...
5239 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5240 PFRES_NEQ, PFRES_NETHCTRL,
5241 PFRES_NIQFLINT, PFRES_NIQ,
5242 PFRES_TC, PFRES_NVI,
5243 FW_PFVF_CMD_CMASK_MASK,
5244 pfvfres_pmask(adapter, adapter->fn, 0),
5246 PFRES_R_CAPS, PFRES_WX_CAPS);
5251 * Perform low level SGE initialization. We need to do this before we
5252 * send the firmware the INITIALIZE command because that will cause
5253 * any other PF Drivers which are waiting for the Master
5254 * Initialization to proceed forward.
5256 for (i = 0; i < SGE_NTIMERS - 1; i++)
5257 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5258 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5259 s->counter_val[0] = 1;
5260 for (i = 1; i < SGE_NCOUNTERS; i++)
5261 s->counter_val[i] = min(intr_cnt[i - 1],
5262 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5263 t4_sge_init(adapter);
5265 #ifdef CONFIG_PCI_IOV
5267 * Provision resource limits for Virtual Functions. We currently
5268 * grant them all the same static resource limits except for the Port
5269 * Access Rights Mask which we're assigning based on the PF. All of
5270 * the static provisioning stuff for both the PF and VF really needs
5271 * to be managed in a persistent manner for each device which the
5272 * firmware controls.
5277 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5278 if (num_vf[pf] <= 0)
5281 /* VF numbering starts at 1! */
5282 for (vf = 1; vf <= num_vf[pf]; vf++) {
5283 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5285 VFRES_NEQ, VFRES_NETHCTRL,
5286 VFRES_NIQFLINT, VFRES_NIQ,
5287 VFRES_TC, VFRES_NVI,
5288 FW_PFVF_CMD_CMASK_MASK,
5292 VFRES_R_CAPS, VFRES_WX_CAPS);
5294 dev_warn(adapter->pdev_dev,
5296 "provision pf/vf=%d/%d; "
5297 "err=%d\n", pf, vf, ret);
5304 * Set up the default filter mode. Later we'll want to implement this
5305 * via a firmware command, etc. ... This needs to be done before the
5306 * firmare initialization command ... If the selected set of fields
5307 * isn't equal to the default value, we'll need to make sure that the
5308 * field selections will fit in the 36-bit budget.
5310 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5313 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5314 switch (tp_vlan_pri_map & (1 << j)) {
5316 /* compressed filter field not enabled */
5336 case ETHERTYPE_MASK:
5342 case MPSHITTYPE_MASK:
5345 case FRAGMENTATION_MASK:
5351 dev_err(adapter->pdev_dev,
5352 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5353 " using %#x\n", tp_vlan_pri_map, bits,
5354 TP_VLAN_PRI_MAP_DEFAULT);
5355 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5358 v = tp_vlan_pri_map;
5359 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5360 &v, 1, TP_VLAN_PRI_MAP);
5363 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5364 * to support any of the compressed filter fields above. Newer
5365 * versions of the firmware do this automatically but it doesn't hurt
5366 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5367 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5368 * since the firmware automatically turns this on and off when we have
5369 * a non-zero number of filters active (since it does have a
5370 * performance impact).
5372 if (tp_vlan_pri_map)
5373 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5374 FIVETUPLELOOKUP_MASK,
5375 FIVETUPLELOOKUP_MASK);
5378 * Tweak some settings.
5380 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5381 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5382 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5383 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5386 * Get basic stuff going by issuing the Firmware Initialize command.
5387 * Note that this _must_ be after all PFVF commands ...
5389 ret = t4_fw_initialize(adapter, adapter->mbox);
5394 * Return successfully!
5396 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5397 "driver parameters\n");
5401 * Something bad happened. Return the error ...
5407 static struct fw_info fw_info_array[] = {
5410 .fs_name = FW4_CFNAME,
5411 .fw_mod_name = FW4_FNAME,
5413 .chip = FW_HDR_CHIP_T4,
5414 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5415 .intfver_nic = FW_INTFVER(T4, NIC),
5416 .intfver_vnic = FW_INTFVER(T4, VNIC),
5417 .intfver_ri = FW_INTFVER(T4, RI),
5418 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5419 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5423 .fs_name = FW5_CFNAME,
5424 .fw_mod_name = FW5_FNAME,
5426 .chip = FW_HDR_CHIP_T5,
5427 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5428 .intfver_nic = FW_INTFVER(T5, NIC),
5429 .intfver_vnic = FW_INTFVER(T5, VNIC),
5430 .intfver_ri = FW_INTFVER(T5, RI),
5431 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5432 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5437 static struct fw_info *find_fw_info(int chip)
5441 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5442 if (fw_info_array[i].chip == chip)
5443 return &fw_info_array[i];
5449 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5451 static int adap_init0(struct adapter *adap)
5455 enum dev_state state;
5456 u32 params[7], val[7];
5457 struct fw_caps_config_cmd caps_cmd;
5461 * Contact FW, advertising Master capability (and potentially forcing
5462 * ourselves as the Master PF if our module parameter force_init is
5465 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5466 force_init ? MASTER_MUST : MASTER_MAY,
5469 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5473 if (ret == adap->mbox)
5474 adap->flags |= MASTER_PF;
5475 if (force_init && state == DEV_STATE_INIT)
5476 state = DEV_STATE_UNINIT;
5479 * If we're the Master PF Driver and the device is uninitialized,
5480 * then let's consider upgrading the firmware ... (We always want
5481 * to check the firmware version number in order to A. get it for
5482 * later reporting and B. to warn if the currently loaded firmware
5483 * is excessively mismatched relative to the driver.)
5485 t4_get_fw_version(adap, &adap->params.fw_vers);
5486 t4_get_tp_version(adap, &adap->params.tp_vers);
5487 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5488 struct fw_info *fw_info;
5489 struct fw_hdr *card_fw;
5490 const struct firmware *fw;
5491 const u8 *fw_data = NULL;
5492 unsigned int fw_size = 0;
5494 /* This is the firmware whose headers the driver was compiled
5497 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5498 if (fw_info == NULL) {
5499 dev_err(adap->pdev_dev,
5500 "unable to get firmware info for chip %d.\n",
5501 CHELSIO_CHIP_VERSION(adap->params.chip));
5505 /* allocate memory to read the header of the firmware on the
5508 card_fw = t4_alloc_mem(sizeof(*card_fw));
5510 /* Get FW from from /lib/firmware/ */
5511 ret = request_firmware(&fw, fw_info->fw_mod_name,
5514 dev_err(adap->pdev_dev,
5515 "unable to load firmware image %s, error %d\n",
5516 fw_info->fw_mod_name, ret);
5522 /* upgrade FW logic */
5523 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5528 release_firmware(fw);
5529 t4_free_mem(card_fw);
5536 * Grab VPD parameters. This should be done after we establish a
5537 * connection to the firmware since some of the VPD parameters
5538 * (notably the Core Clock frequency) are retrieved via requests to
5539 * the firmware. On the other hand, we need these fairly early on
5540 * so we do this right after getting ahold of the firmware.
5542 ret = get_vpd_params(adap, &adap->params.vpd);
5547 * Find out what ports are available to us. Note that we need to do
5548 * this before calling adap_init0_no_config() since it needs nports
5552 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5553 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5554 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5558 adap->params.nports = hweight32(port_vec);
5559 adap->params.portvec = port_vec;
5562 * If the firmware is initialized already (and we're not forcing a
5563 * master initialization), note that we're living with existing
5564 * adapter parameters. Otherwise, it's time to try initializing the
5567 if (state == DEV_STATE_INIT) {
5568 dev_info(adap->pdev_dev, "Coming up as %s: "\
5569 "Adapter already initialized\n",
5570 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5571 adap->flags |= USING_SOFT_PARAMS;
5573 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5574 "Initializing adapter\n");
5577 * If the firmware doesn't support Configuration
5578 * Files warn user and exit,
5581 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5582 "configuration file.\n");
5584 ret = adap_init0_no_config(adap, reset);
5587 * Find out whether we're dealing with a version of
5588 * the firmware which has configuration file support.
5590 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5591 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5592 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5596 * If the firmware doesn't support Configuration
5597 * Files, use the old Driver-based, hard-wired
5598 * initialization. Otherwise, try using the
5599 * Configuration File support and fall back to the
5600 * Driver-based initialization if there's no
5601 * Configuration File found.
5604 ret = adap_init0_no_config(adap, reset);
5607 * The firmware provides us with a memory
5608 * buffer where we can load a Configuration
5609 * File from the host if we want to override
5610 * the Configuration File in flash.
5613 ret = adap_init0_config(adap, reset);
5614 if (ret == -ENOENT) {
5615 dev_info(adap->pdev_dev,
5616 "No Configuration File present "
5617 "on adapter. Using hard-wired "
5618 "configuration parameters.\n");
5619 ret = adap_init0_no_config(adap, reset);
5624 dev_err(adap->pdev_dev,
5625 "could not initialize adapter, error %d\n",
5632 * If we're living with non-hard-coded parameters (either from a
5633 * Firmware Configuration File or values programmed by a different PF
5634 * Driver), give the SGE code a chance to pull in anything that it
5635 * needs ... Note that this must be called after we retrieve our VPD
5636 * parameters in order to know how to convert core ticks to seconds.
5638 if (adap->flags & USING_SOFT_PARAMS) {
5639 ret = t4_sge_init(adap);
5644 if (is_bypass_device(adap->pdev->device))
5645 adap->params.bypass = 1;
5648 * Grab some of our basic fundamental operating parameters.
5650 #define FW_PARAM_DEV(param) \
5651 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5652 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5654 #define FW_PARAM_PFVF(param) \
5655 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5656 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5657 FW_PARAMS_PARAM_Y(0) | \
5658 FW_PARAMS_PARAM_Z(0)
5660 params[0] = FW_PARAM_PFVF(EQ_START);
5661 params[1] = FW_PARAM_PFVF(L2T_START);
5662 params[2] = FW_PARAM_PFVF(L2T_END);
5663 params[3] = FW_PARAM_PFVF(FILTER_START);
5664 params[4] = FW_PARAM_PFVF(FILTER_END);
5665 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5666 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5669 adap->sge.egr_start = val[0];
5670 adap->l2t_start = val[1];
5671 adap->l2t_end = val[2];
5672 adap->tids.ftid_base = val[3];
5673 adap->tids.nftids = val[4] - val[3] + 1;
5674 adap->sge.ingr_start = val[5];
5676 /* query params related to active filter region */
5677 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5678 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5679 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5680 /* If Active filter size is set we enable establishing
5681 * offload connection through firmware work request
5683 if ((val[0] != val[1]) && (ret >= 0)) {
5684 adap->flags |= FW_OFLD_CONN;
5685 adap->tids.aftid_base = val[0];
5686 adap->tids.aftid_end = val[1];
5689 /* If we're running on newer firmware, let it know that we're
5690 * prepared to deal with encapsulated CPL messages. Older
5691 * firmware won't understand this and we'll just get
5692 * unencapsulated messages ...
5694 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5696 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5699 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5700 * capability. Earlier versions of the firmware didn't have the
5701 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5702 * permission to use ULPTX MEMWRITE DSGL.
5704 if (is_t4(adap->params.chip)) {
5705 adap->params.ulptx_memwrite_dsgl = false;
5707 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5708 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5710 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5714 * Get device capabilities so we can determine what resources we need
5717 memset(&caps_cmd, 0, sizeof(caps_cmd));
5718 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5719 FW_CMD_REQUEST | FW_CMD_READ);
5720 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5721 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5726 if (caps_cmd.ofldcaps) {
5727 /* query offload-related parameters */
5728 params[0] = FW_PARAM_DEV(NTID);
5729 params[1] = FW_PARAM_PFVF(SERVER_START);
5730 params[2] = FW_PARAM_PFVF(SERVER_END);
5731 params[3] = FW_PARAM_PFVF(TDDP_START);
5732 params[4] = FW_PARAM_PFVF(TDDP_END);
5733 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5734 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5738 adap->tids.ntids = val[0];
5739 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5740 adap->tids.stid_base = val[1];
5741 adap->tids.nstids = val[2] - val[1] + 1;
5743 * Setup server filter region. Divide the availble filter
5744 * region into two parts. Regular filters get 1/3rd and server
5745 * filters get 2/3rd part. This is only enabled if workarond
5747 * 1. For regular filters.
5748 * 2. Server filter: This are special filters which are used
5749 * to redirect SYN packets to offload queue.
5751 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5752 adap->tids.sftid_base = adap->tids.ftid_base +
5753 DIV_ROUND_UP(adap->tids.nftids, 3);
5754 adap->tids.nsftids = adap->tids.nftids -
5755 DIV_ROUND_UP(adap->tids.nftids, 3);
5756 adap->tids.nftids = adap->tids.sftid_base -
5757 adap->tids.ftid_base;
5759 adap->vres.ddp.start = val[3];
5760 adap->vres.ddp.size = val[4] - val[3] + 1;
5761 adap->params.ofldq_wr_cred = val[5];
5763 adap->params.offload = 1;
5765 if (caps_cmd.rdmacaps) {
5766 params[0] = FW_PARAM_PFVF(STAG_START);
5767 params[1] = FW_PARAM_PFVF(STAG_END);
5768 params[2] = FW_PARAM_PFVF(RQ_START);
5769 params[3] = FW_PARAM_PFVF(RQ_END);
5770 params[4] = FW_PARAM_PFVF(PBL_START);
5771 params[5] = FW_PARAM_PFVF(PBL_END);
5772 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5776 adap->vres.stag.start = val[0];
5777 adap->vres.stag.size = val[1] - val[0] + 1;
5778 adap->vres.rq.start = val[2];
5779 adap->vres.rq.size = val[3] - val[2] + 1;
5780 adap->vres.pbl.start = val[4];
5781 adap->vres.pbl.size = val[5] - val[4] + 1;
5783 params[0] = FW_PARAM_PFVF(SQRQ_START);
5784 params[1] = FW_PARAM_PFVF(SQRQ_END);
5785 params[2] = FW_PARAM_PFVF(CQ_START);
5786 params[3] = FW_PARAM_PFVF(CQ_END);
5787 params[4] = FW_PARAM_PFVF(OCQ_START);
5788 params[5] = FW_PARAM_PFVF(OCQ_END);
5789 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5792 adap->vres.qp.start = val[0];
5793 adap->vres.qp.size = val[1] - val[0] + 1;
5794 adap->vres.cq.start = val[2];
5795 adap->vres.cq.size = val[3] - val[2] + 1;
5796 adap->vres.ocq.start = val[4];
5797 adap->vres.ocq.size = val[5] - val[4] + 1;
5799 if (caps_cmd.iscsicaps) {
5800 params[0] = FW_PARAM_PFVF(ISCSI_START);
5801 params[1] = FW_PARAM_PFVF(ISCSI_END);
5802 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5806 adap->vres.iscsi.start = val[0];
5807 adap->vres.iscsi.size = val[1] - val[0] + 1;
5809 #undef FW_PARAM_PFVF
5812 /* The MTU/MSS Table is initialized by now, so load their values. If
5813 * we're initializing the adapter, then we'll make any modifications
5814 * we want to the MTU/MSS Table and also initialize the congestion
5817 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5818 if (state != DEV_STATE_INIT) {
5821 /* The default MTU Table contains values 1492 and 1500.
5822 * However, for TCP, it's better to have two values which are
5823 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5824 * This allows us to have a TCP Data Payload which is a
5825 * multiple of 8 regardless of what combination of TCP Options
5826 * are in use (always a multiple of 4 bytes) which is
5827 * important for performance reasons. For instance, if no
5828 * options are in use, then we have a 20-byte IP header and a
5829 * 20-byte TCP header. In this case, a 1500-byte MSS would
5830 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5831 * which is not a multiple of 8. So using an MSS of 1488 in
5832 * this case results in a TCP Data Payload of 1448 bytes which
5833 * is a multiple of 8. On the other hand, if 12-byte TCP Time
5834 * Stamps have been negotiated, then an MTU of 1500 bytes
5835 * results in a TCP Data Payload of 1448 bytes which, as
5836 * above, is a multiple of 8 bytes ...
5838 for (i = 0; i < NMTUS; i++)
5839 if (adap->params.mtus[i] == 1492) {
5840 adap->params.mtus[i] = 1488;
5844 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5845 adap->params.b_wnd);
5847 t4_init_tp_params(adap);
5848 adap->flags |= FW_OK;
5852 * Something bad happened. If a command timed out or failed with EIO
5853 * FW does not operate within its spec or something catastrophic
5854 * happened to HW/FW, stop issuing commands.
5857 if (ret != -ETIMEDOUT && ret != -EIO)
5858 t4_fw_bye(adap, adap->mbox);
5864 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5865 pci_channel_state_t state)
5868 struct adapter *adap = pci_get_drvdata(pdev);
5874 adap->flags &= ~FW_OK;
5875 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5876 spin_lock(&adap->stats_lock);
5877 for_each_port(adap, i) {
5878 struct net_device *dev = adap->port[i];
5880 netif_device_detach(dev);
5881 netif_carrier_off(dev);
5883 spin_unlock(&adap->stats_lock);
5884 if (adap->flags & FULL_INIT_DONE)
5887 if ((adap->flags & DEV_ENABLED)) {
5888 pci_disable_device(pdev);
5889 adap->flags &= ~DEV_ENABLED;
5891 out: return state == pci_channel_io_perm_failure ?
5892 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5895 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5898 struct fw_caps_config_cmd c;
5899 struct adapter *adap = pci_get_drvdata(pdev);
5902 pci_restore_state(pdev);
5903 pci_save_state(pdev);
5904 return PCI_ERS_RESULT_RECOVERED;
5907 if (!(adap->flags & DEV_ENABLED)) {
5908 if (pci_enable_device(pdev)) {
5909 dev_err(&pdev->dev, "Cannot reenable PCI "
5910 "device after reset\n");
5911 return PCI_ERS_RESULT_DISCONNECT;
5913 adap->flags |= DEV_ENABLED;
5916 pci_set_master(pdev);
5917 pci_restore_state(pdev);
5918 pci_save_state(pdev);
5919 pci_cleanup_aer_uncorrect_error_status(pdev);
5921 if (t4_wait_dev_ready(adap) < 0)
5922 return PCI_ERS_RESULT_DISCONNECT;
5923 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5924 return PCI_ERS_RESULT_DISCONNECT;
5925 adap->flags |= FW_OK;
5926 if (adap_init1(adap, &c))
5927 return PCI_ERS_RESULT_DISCONNECT;
5929 for_each_port(adap, i) {
5930 struct port_info *p = adap2pinfo(adap, i);
5932 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5935 return PCI_ERS_RESULT_DISCONNECT;
5937 p->xact_addr_filt = -1;
5940 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5941 adap->params.b_wnd);
5944 return PCI_ERS_RESULT_DISCONNECT;
5945 return PCI_ERS_RESULT_RECOVERED;
5948 static void eeh_resume(struct pci_dev *pdev)
5951 struct adapter *adap = pci_get_drvdata(pdev);
5957 for_each_port(adap, i) {
5958 struct net_device *dev = adap->port[i];
5960 if (netif_running(dev)) {
5962 cxgb_set_rxmode(dev);
5964 netif_device_attach(dev);
5969 static const struct pci_error_handlers cxgb4_eeh = {
5970 .error_detected = eeh_err_detected,
5971 .slot_reset = eeh_slot_reset,
5972 .resume = eeh_resume,
5975 static inline bool is_x_10g_port(const struct link_config *lc)
5977 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
5978 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
5981 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
5982 unsigned int us, unsigned int cnt,
5983 unsigned int size, unsigned int iqe_size)
5986 set_rspq_intr_params(q, us, cnt);
5987 q->iqe_len = iqe_size;
5992 * Perform default configuration of DMA queues depending on the number and type
5993 * of ports we found and the number of available CPUs. Most settings can be
5994 * modified by the admin prior to actual use.
5996 static void cfg_queues(struct adapter *adap)
5998 struct sge *s = &adap->sge;
5999 int i, n10g = 0, qidx = 0;
6000 #ifndef CONFIG_CHELSIO_T4_DCB
6005 for_each_port(adap, i)
6006 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6007 #ifdef CONFIG_CHELSIO_T4_DCB
6008 /* For Data Center Bridging support we need to be able to support up
6009 * to 8 Traffic Priorities; each of which will be assigned to its
6010 * own TX Queue in order to prevent Head-Of-Line Blocking.
6012 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6013 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6014 MAX_ETH_QSETS, adap->params.nports * 8);
6018 for_each_port(adap, i) {
6019 struct port_info *pi = adap2pinfo(adap, i);
6021 pi->first_qset = qidx;
6025 #else /* !CONFIG_CHELSIO_T4_DCB */
6027 * We default to 1 queue per non-10G port and up to # of cores queues
6031 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6032 if (q10g > netif_get_num_default_rss_queues())
6033 q10g = netif_get_num_default_rss_queues();
6035 for_each_port(adap, i) {
6036 struct port_info *pi = adap2pinfo(adap, i);
6038 pi->first_qset = qidx;
6039 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6042 #endif /* !CONFIG_CHELSIO_T4_DCB */
6045 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6047 if (is_offload(adap)) {
6049 * For offload we use 1 queue/channel if all ports are up to 1G,
6050 * otherwise we divide all available queues amongst the channels
6051 * capped by the number of available cores.
6054 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6056 s->ofldqsets = roundup(i, adap->params.nports);
6058 s->ofldqsets = adap->params.nports;
6059 /* For RDMA one Rx queue per channel suffices */
6060 s->rdmaqs = adap->params.nports;
6061 s->rdmaciqs = adap->params.nports;
6064 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6065 struct sge_eth_rxq *r = &s->ethrxq[i];
6067 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6071 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6072 s->ethtxq[i].q.size = 1024;
6074 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6075 s->ctrlq[i].q.size = 512;
6077 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6078 s->ofldtxq[i].q.size = 1024;
6080 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6081 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6083 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6084 r->rspq.uld = CXGB4_ULD_ISCSI;
6088 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6089 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6091 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6092 r->rspq.uld = CXGB4_ULD_RDMA;
6096 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6097 if (ciq_size > SGE_MAX_IQ_SIZE) {
6098 CH_WARN(adap, "CIQ size too small for available IQs\n");
6099 ciq_size = SGE_MAX_IQ_SIZE;
6102 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6103 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6105 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6106 r->rspq.uld = CXGB4_ULD_RDMA;
6109 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6110 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6114 * Reduce the number of Ethernet queues across all ports to at most n.
6115 * n provides at least one queue per port.
6117 static void reduce_ethqs(struct adapter *adap, int n)
6120 struct port_info *pi;
6122 while (n < adap->sge.ethqsets)
6123 for_each_port(adap, i) {
6124 pi = adap2pinfo(adap, i);
6125 if (pi->nqsets > 1) {
6127 adap->sge.ethqsets--;
6128 if (adap->sge.ethqsets <= n)
6134 for_each_port(adap, i) {
6135 pi = adap2pinfo(adap, i);
6141 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6142 #define EXTRA_VECS 2
6144 static int enable_msix(struct adapter *adap)
6148 struct sge *s = &adap->sge;
6149 unsigned int nchan = adap->params.nports;
6150 struct msix_entry entries[MAX_INGQ + 1];
6152 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6153 entries[i].entry = i;
6155 want = s->max_ethqsets + EXTRA_VECS;
6156 if (is_offload(adap)) {
6157 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6158 /* need nchan for each possible ULD */
6159 ofld_need = 3 * nchan;
6161 #ifdef CONFIG_CHELSIO_T4_DCB
6162 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6165 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6167 need = adap->params.nports + EXTRA_VECS + ofld_need;
6169 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6174 * Distribute available vectors to the various queue groups.
6175 * Every group gets its minimum requirement and NIC gets top
6176 * priority for leftovers.
6178 i = want - EXTRA_VECS - ofld_need;
6179 if (i < s->max_ethqsets) {
6180 s->max_ethqsets = i;
6181 if (i < s->ethqsets)
6182 reduce_ethqs(adap, i);
6184 if (is_offload(adap)) {
6185 i = want - EXTRA_VECS - s->max_ethqsets;
6186 i -= ofld_need - nchan;
6187 s->ofldqsets = (i / nchan) * nchan; /* round down */
6189 for (i = 0; i < want; ++i)
6190 adap->msix_info[i].vec = entries[i].vector;
6197 static int init_rss(struct adapter *adap)
6201 for_each_port(adap, i) {
6202 struct port_info *pi = adap2pinfo(adap, i);
6204 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6207 for (j = 0; j < pi->rss_size; j++)
6208 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6213 static void print_port_info(const struct net_device *dev)
6217 const char *spd = "";
6218 const struct port_info *pi = netdev_priv(dev);
6219 const struct adapter *adap = pi->adapter;
6221 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6223 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6225 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6228 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6229 bufp += sprintf(bufp, "100/");
6230 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6231 bufp += sprintf(bufp, "1000/");
6232 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6233 bufp += sprintf(bufp, "10G/");
6234 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6235 bufp += sprintf(bufp, "40G/");
6238 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6240 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6241 adap->params.vpd.id,
6242 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6243 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6244 (adap->flags & USING_MSIX) ? " MSI-X" :
6245 (adap->flags & USING_MSI) ? " MSI" : "");
6246 netdev_info(dev, "S/N: %s, P/N: %s\n",
6247 adap->params.vpd.sn, adap->params.vpd.pn);
6250 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6252 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6256 * Free the following resources:
6257 * - memory used for tables
6260 * - resources FW is holding for us
6262 static void free_some_resources(struct adapter *adapter)
6266 t4_free_mem(adapter->l2t);
6267 t4_free_mem(adapter->tids.tid_tab);
6268 disable_msi(adapter);
6270 for_each_port(adapter, i)
6271 if (adapter->port[i]) {
6272 kfree(adap2pinfo(adapter, i)->rss);
6273 free_netdev(adapter->port[i]);
6275 if (adapter->flags & FW_OK)
6276 t4_fw_bye(adapter, adapter->fn);
6279 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6280 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6281 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6282 #define SEGMENT_SIZE 128
6284 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6286 int func, i, err, s_qpp, qpp, num_seg;
6287 struct port_info *pi;
6288 bool highdma = false;
6289 struct adapter *adapter = NULL;
6291 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6293 err = pci_request_regions(pdev, KBUILD_MODNAME);
6295 /* Just info, some other driver may have claimed the device. */
6296 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6300 /* We control everything through one PF */
6301 func = PCI_FUNC(pdev->devfn);
6302 if (func != ent->driver_data) {
6303 pci_save_state(pdev); /* to restore SR-IOV later */
6307 err = pci_enable_device(pdev);
6309 dev_err(&pdev->dev, "cannot enable PCI device\n");
6310 goto out_release_regions;
6313 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6315 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6317 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6318 "coherent allocations\n");
6319 goto out_disable_device;
6322 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6324 dev_err(&pdev->dev, "no usable DMA configuration\n");
6325 goto out_disable_device;
6329 pci_enable_pcie_error_reporting(pdev);
6330 enable_pcie_relaxed_ordering(pdev);
6331 pci_set_master(pdev);
6332 pci_save_state(pdev);
6334 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6337 goto out_disable_device;
6340 /* PCI device has been enabled */
6341 adapter->flags |= DEV_ENABLED;
6343 adapter->regs = pci_ioremap_bar(pdev, 0);
6344 if (!adapter->regs) {
6345 dev_err(&pdev->dev, "cannot map device registers\n");
6347 goto out_free_adapter;
6350 adapter->pdev = pdev;
6351 adapter->pdev_dev = &pdev->dev;
6352 adapter->mbox = func;
6354 adapter->msg_enable = dflt_msg_enable;
6355 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6357 spin_lock_init(&adapter->stats_lock);
6358 spin_lock_init(&adapter->tid_release_lock);
6360 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6361 INIT_WORK(&adapter->db_full_task, process_db_full);
6362 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6364 err = t4_prep_adapter(adapter);
6366 goto out_unmap_bar0;
6368 if (!is_t4(adapter->params.chip)) {
6369 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6370 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6371 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6372 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6374 /* Each segment size is 128B. Write coalescing is enabled only
6375 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6376 * queue is less no of segments that can be accommodated in
6379 if (qpp > num_seg) {
6381 "Incorrect number of egress queues per page\n");
6383 goto out_unmap_bar0;
6385 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6386 pci_resource_len(pdev, 2));
6387 if (!adapter->bar2) {
6388 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6390 goto out_unmap_bar0;
6394 setup_memwin(adapter);
6395 err = adap_init0(adapter);
6396 setup_memwin_rdma(adapter);
6400 for_each_port(adapter, i) {
6401 struct net_device *netdev;
6403 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6410 SET_NETDEV_DEV(netdev, &pdev->dev);
6412 adapter->port[i] = netdev;
6413 pi = netdev_priv(netdev);
6414 pi->adapter = adapter;
6415 pi->xact_addr_filt = -1;
6417 netdev->irq = pdev->irq;
6419 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6420 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6421 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6422 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6424 netdev->hw_features |= NETIF_F_HIGHDMA;
6425 netdev->features |= netdev->hw_features;
6426 netdev->vlan_features = netdev->features & VLAN_FEAT;
6428 netdev->priv_flags |= IFF_UNICAST_FLT;
6430 netdev->netdev_ops = &cxgb4_netdev_ops;
6431 #ifdef CONFIG_CHELSIO_T4_DCB
6432 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6433 cxgb4_dcb_state_init(netdev);
6435 netdev->ethtool_ops = &cxgb_ethtool_ops;
6438 pci_set_drvdata(pdev, adapter);
6440 if (adapter->flags & FW_OK) {
6441 err = t4_port_init(adapter, func, func, 0);
6447 * Configure queues and allocate tables now, they can be needed as
6448 * soon as the first register_netdev completes.
6450 cfg_queues(adapter);
6452 adapter->l2t = t4_init_l2t();
6453 if (!adapter->l2t) {
6454 /* We tolerate a lack of L2T, giving up some functionality */
6455 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6456 adapter->params.offload = 0;
6459 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6460 dev_warn(&pdev->dev, "could not allocate TID table, "
6462 adapter->params.offload = 0;
6465 /* See what interrupts we'll be using */
6466 if (msi > 1 && enable_msix(adapter) == 0)
6467 adapter->flags |= USING_MSIX;
6468 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6469 adapter->flags |= USING_MSI;
6471 err = init_rss(adapter);
6476 * The card is now ready to go. If any errors occur during device
6477 * registration we do not fail the whole card but rather proceed only
6478 * with the ports we manage to register successfully. However we must
6479 * register at least one net device.
6481 for_each_port(adapter, i) {
6482 pi = adap2pinfo(adapter, i);
6483 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6484 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6486 err = register_netdev(adapter->port[i]);
6489 adapter->chan_map[pi->tx_chan] = i;
6490 print_port_info(adapter->port[i]);
6493 dev_err(&pdev->dev, "could not register any net devices\n");
6497 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6501 if (cxgb4_debugfs_root) {
6502 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6503 cxgb4_debugfs_root);
6504 setup_debugfs(adapter);
6507 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6508 pdev->needs_freset = 1;
6510 if (is_offload(adapter))
6511 attach_ulds(adapter);
6514 #ifdef CONFIG_PCI_IOV
6515 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6516 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6517 dev_info(&pdev->dev,
6518 "instantiated %u virtual functions\n",
6524 free_some_resources(adapter);
6526 if (!is_t4(adapter->params.chip))
6527 iounmap(adapter->bar2);
6529 iounmap(adapter->regs);
6533 pci_disable_pcie_error_reporting(pdev);
6534 pci_disable_device(pdev);
6535 out_release_regions:
6536 pci_release_regions(pdev);
6540 static void remove_one(struct pci_dev *pdev)
6542 struct adapter *adapter = pci_get_drvdata(pdev);
6544 #ifdef CONFIG_PCI_IOV
6545 pci_disable_sriov(pdev);
6552 if (is_offload(adapter))
6553 detach_ulds(adapter);
6555 for_each_port(adapter, i)
6556 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6557 unregister_netdev(adapter->port[i]);
6559 if (adapter->debugfs_root)
6560 debugfs_remove_recursive(adapter->debugfs_root);
6562 /* If we allocated filters, free up state associated with any
6565 if (adapter->tids.ftid_tab) {
6566 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6567 for (i = 0; i < (adapter->tids.nftids +
6568 adapter->tids.nsftids); i++, f++)
6570 clear_filter(adapter, f);
6573 if (adapter->flags & FULL_INIT_DONE)
6576 free_some_resources(adapter);
6577 iounmap(adapter->regs);
6578 if (!is_t4(adapter->params.chip))
6579 iounmap(adapter->bar2);
6580 pci_disable_pcie_error_reporting(pdev);
6581 if ((adapter->flags & DEV_ENABLED)) {
6582 pci_disable_device(pdev);
6583 adapter->flags &= ~DEV_ENABLED;
6585 pci_release_regions(pdev);
6588 pci_release_regions(pdev);
6591 static struct pci_driver cxgb4_driver = {
6592 .name = KBUILD_MODNAME,
6593 .id_table = cxgb4_pci_tbl,
6595 .remove = remove_one,
6596 .shutdown = remove_one,
6597 .err_handler = &cxgb4_eeh,
6600 static int __init cxgb4_init_module(void)
6604 workq = create_singlethread_workqueue("cxgb4");
6608 /* Debugfs support is optional, just warn if this fails */
6609 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6610 if (!cxgb4_debugfs_root)
6611 pr_warn("could not create debugfs entry, continuing\n");
6613 ret = pci_register_driver(&cxgb4_driver);
6615 debugfs_remove(cxgb4_debugfs_root);
6616 destroy_workqueue(workq);
6619 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6624 static void __exit cxgb4_cleanup_module(void)
6626 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6627 pci_unregister_driver(&cxgb4_driver);
6628 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6629 flush_workqueue(workq);
6630 destroy_workqueue(workq);
6633 module_init(cxgb4_init_module);
6634 module_exit(cxgb4_cleanup_module);