1 /*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
4 * Copyright (C) 2005 - 2009 Myricom, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Myricom, Inc. nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * If the eeprom on your board is not recent enough, you will need to get a
33 * newer firmware image at:
34 * http://www.myri.com/scs/download-Myri10GE.html
36 * Contact Information:
38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
39 *************************************************************************/
41 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 #include <linux/tcp.h>
44 #include <linux/netdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/string.h>
47 #include <linux/module.h>
48 #include <linux/pci.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/etherdevice.h>
51 #include <linux/if_ether.h>
52 #include <linux/if_vlan.h>
53 #include <linux/inet_lro.h>
54 #include <linux/dca.h>
56 #include <linux/inet.h>
58 #include <linux/ethtool.h>
59 #include <linux/firmware.h>
60 #include <linux/delay.h>
61 #include <linux/timer.h>
62 #include <linux/vmalloc.h>
63 #include <linux/crc32.h>
64 #include <linux/moduleparam.h>
66 #include <linux/log2.h>
67 #include <linux/slab.h>
68 #include <linux/prefetch.h>
69 #include <net/checksum.h>
72 #include <asm/byteorder.h>
74 #include <asm/processor.h>
79 #include "myri10ge_mcp.h"
80 #include "myri10ge_mcp_gen_header.h"
82 #define MYRI10GE_VERSION_STR "1.5.2-1.459"
84 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
85 MODULE_AUTHOR("Maintainer: help@myri.com");
86 MODULE_VERSION(MYRI10GE_VERSION_STR);
87 MODULE_LICENSE("Dual BSD/GPL");
89 #define MYRI10GE_MAX_ETHER_MTU 9014
91 #define MYRI10GE_ETH_STOPPED 0
92 #define MYRI10GE_ETH_STOPPING 1
93 #define MYRI10GE_ETH_STARTING 2
94 #define MYRI10GE_ETH_RUNNING 3
95 #define MYRI10GE_ETH_OPEN_FAILED 4
97 #define MYRI10GE_EEPROM_STRINGS_SIZE 256
98 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
99 #define MYRI10GE_MAX_LRO_DESCRIPTORS 8
100 #define MYRI10GE_LRO_MAX_PKTS 64
102 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
103 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
105 #define MYRI10GE_ALLOC_ORDER 0
106 #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
107 #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
109 #define MYRI10GE_MAX_SLICES 32
111 struct myri10ge_rx_buffer_state {
114 DEFINE_DMA_UNMAP_ADDR(bus);
115 DEFINE_DMA_UNMAP_LEN(len);
118 struct myri10ge_tx_buffer_state {
121 DEFINE_DMA_UNMAP_ADDR(bus);
122 DEFINE_DMA_UNMAP_LEN(len);
125 struct myri10ge_cmd {
131 struct myri10ge_rx_buf {
132 struct mcp_kreq_ether_recv __iomem *lanai; /* lanai ptr for recv ring */
133 struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */
134 struct myri10ge_rx_buffer_state *info;
141 int mask; /* number of rx slots -1 */
145 struct myri10ge_tx_buf {
146 struct mcp_kreq_ether_send __iomem *lanai; /* lanai ptr for sendq */
147 __be32 __iomem *send_go; /* "go" doorbell ptr */
148 __be32 __iomem *send_stop; /* "stop" doorbell ptr */
149 struct mcp_kreq_ether_send *req_list; /* host shadow of sendq */
151 struct myri10ge_tx_buffer_state *info;
152 int mask; /* number of transmit slots -1 */
153 int req ____cacheline_aligned; /* transmit slots submitted */
154 int pkt_start; /* packets started */
157 int done ____cacheline_aligned; /* transmit slots completed */
158 int pkt_done; /* packets completed */
163 struct myri10ge_rx_done {
164 struct mcp_slot *entry;
168 struct net_lro_mgr lro_mgr;
169 struct net_lro_desc lro_desc[MYRI10GE_MAX_LRO_DESCRIPTORS];
172 struct myri10ge_slice_netstats {
173 unsigned long rx_packets;
174 unsigned long tx_packets;
175 unsigned long rx_bytes;
176 unsigned long tx_bytes;
177 unsigned long rx_dropped;
178 unsigned long tx_dropped;
181 struct myri10ge_slice_state {
182 struct myri10ge_tx_buf tx; /* transmit ring */
183 struct myri10ge_rx_buf rx_small;
184 struct myri10ge_rx_buf rx_big;
185 struct myri10ge_rx_done rx_done;
186 struct net_device *dev;
187 struct napi_struct napi;
188 struct myri10ge_priv *mgp;
189 struct myri10ge_slice_netstats stats;
190 __be32 __iomem *irq_claim;
191 struct mcp_irq_data *fw_stats;
192 dma_addr_t fw_stats_bus;
193 int watchdog_tx_done;
195 int watchdog_rx_done;
196 #ifdef CONFIG_MYRI10GE_DCA
199 __be32 __iomem *dca_tag;
204 struct myri10ge_priv {
205 struct myri10ge_slice_state *ss;
206 int tx_boundary; /* boundary transmits cannot cross */
208 int running; /* running? */
212 struct net_device *dev;
215 unsigned long board_span;
216 unsigned long iomem_base;
217 __be32 __iomem *irq_deassert;
218 char *mac_addr_string;
219 struct mcp_cmd_response *cmd;
221 struct pci_dev *pdev;
224 struct msix_entry *msix_vectors;
225 #ifdef CONFIG_MYRI10GE_DCA
230 unsigned int rdma_tags_available;
232 __be32 __iomem *intr_coal_delay_ptr;
236 wait_queue_head_t down_wq;
237 struct work_struct watchdog_work;
238 struct timer_list watchdog_timer;
242 bool fw_name_allocated;
244 char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
245 char *product_code_string;
246 char fw_version[128];
250 int adopted_rx_filter_bug;
251 u8 mac_addr[6]; /* eeprom mac address */
252 unsigned long serial_number;
253 int vendor_specific_offset;
254 int fw_multicast_support;
262 unsigned int board_number;
266 static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
267 static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
268 static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
269 static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
270 MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
271 MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
272 MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
273 MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
275 /* Careful: must be accessed under kparam_block_sysfs_write */
276 static char *myri10ge_fw_name = NULL;
277 module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
278 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name");
280 #define MYRI10GE_MAX_BOARDS 8
281 static char *myri10ge_fw_names[MYRI10GE_MAX_BOARDS] =
282 {[0 ... (MYRI10GE_MAX_BOARDS - 1)] = NULL };
283 module_param_array_named(myri10ge_fw_names, myri10ge_fw_names, charp, NULL,
285 MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image names per board");
287 static int myri10ge_ecrc_enable = 1;
288 module_param(myri10ge_ecrc_enable, int, S_IRUGO);
289 MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E");
291 static int myri10ge_small_bytes = -1; /* -1 == auto */
292 module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
293 MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets");
295 static int myri10ge_msi = 1; /* enable msi by default */
296 module_param(myri10ge_msi, int, S_IRUGO | S_IWUSR);
297 MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts");
299 static int myri10ge_intr_coal_delay = 75;
300 module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
301 MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay");
303 static int myri10ge_flow_control = 1;
304 module_param(myri10ge_flow_control, int, S_IRUGO);
305 MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter");
307 static int myri10ge_deassert_wait = 1;
308 module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
309 MODULE_PARM_DESC(myri10ge_deassert_wait,
310 "Wait when deasserting legacy interrupts");
312 static int myri10ge_force_firmware = 0;
313 module_param(myri10ge_force_firmware, int, S_IRUGO);
314 MODULE_PARM_DESC(myri10ge_force_firmware,
315 "Force firmware to assume aligned completions");
317 static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
318 module_param(myri10ge_initial_mtu, int, S_IRUGO);
319 MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU");
321 static int myri10ge_napi_weight = 64;
322 module_param(myri10ge_napi_weight, int, S_IRUGO);
323 MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight");
325 static int myri10ge_watchdog_timeout = 1;
326 module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
327 MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout");
329 static int myri10ge_max_irq_loops = 1048576;
330 module_param(myri10ge_max_irq_loops, int, S_IRUGO);
331 MODULE_PARM_DESC(myri10ge_max_irq_loops,
332 "Set stuck legacy IRQ detection threshold");
334 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
336 static int myri10ge_debug = -1; /* defaults above */
337 module_param(myri10ge_debug, int, 0);
338 MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)");
340 static int myri10ge_lro_max_pkts = MYRI10GE_LRO_MAX_PKTS;
341 module_param(myri10ge_lro_max_pkts, int, S_IRUGO);
342 MODULE_PARM_DESC(myri10ge_lro_max_pkts,
343 "Number of LRO packets to be aggregated");
345 static int myri10ge_fill_thresh = 256;
346 module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR);
347 MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed");
349 static int myri10ge_reset_recover = 1;
351 static int myri10ge_max_slices = 1;
352 module_param(myri10ge_max_slices, int, S_IRUGO);
353 MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues");
355 static int myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT;
356 module_param(myri10ge_rss_hash, int, S_IRUGO);
357 MODULE_PARM_DESC(myri10ge_rss_hash, "Type of RSS hashing to do");
359 static int myri10ge_dca = 1;
360 module_param(myri10ge_dca, int, S_IRUGO);
361 MODULE_PARM_DESC(myri10ge_dca, "Enable DCA if possible");
363 #define MYRI10GE_FW_OFFSET 1024*1024
364 #define MYRI10GE_HIGHPART_TO_U32(X) \
365 (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
366 #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
368 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
370 static void myri10ge_set_multicast_list(struct net_device *dev);
371 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
372 struct net_device *dev);
374 static inline void put_be32(__be32 val, __be32 __iomem * p)
376 __raw_writel((__force __u32) val, (__force void __iomem *)p);
379 static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
380 struct rtnl_link_stats64 *stats);
382 static void set_fw_name(struct myri10ge_priv *mgp, char *name, bool allocated)
384 if (mgp->fw_name_allocated)
387 mgp->fw_name_allocated = allocated;
391 myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
392 struct myri10ge_cmd *data, int atomic)
395 char buf_bytes[sizeof(*buf) + 8];
396 struct mcp_cmd_response *response = mgp->cmd;
397 char __iomem *cmd_addr = mgp->sram + MXGEFW_ETH_CMD;
398 u32 dma_low, dma_high, result, value;
401 /* ensure buf is aligned to 8 bytes */
402 buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
404 buf->data0 = htonl(data->data0);
405 buf->data1 = htonl(data->data1);
406 buf->data2 = htonl(data->data2);
407 buf->cmd = htonl(cmd);
408 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
409 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
411 buf->response_addr.low = htonl(dma_low);
412 buf->response_addr.high = htonl(dma_high);
413 response->result = htonl(MYRI10GE_NO_RESPONSE_RESULT);
415 myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
417 /* wait up to 15ms. Longest command is the DMA benchmark,
418 * which is capped at 5ms, but runs from a timeout handler
419 * that runs every 7.8ms. So a 15ms timeout leaves us with
423 /* if atomic is set, do not sleep,
424 * and try to get the completion quickly
425 * (1ms will be enough for those commands) */
426 for (sleep_total = 0;
427 sleep_total < 1000 &&
428 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
434 /* use msleep for most command */
435 for (sleep_total = 0;
437 response->result == htonl(MYRI10GE_NO_RESPONSE_RESULT);
442 result = ntohl(response->result);
443 value = ntohl(response->data);
444 if (result != MYRI10GE_NO_RESPONSE_RESULT) {
448 } else if (result == MXGEFW_CMD_UNKNOWN) {
450 } else if (result == MXGEFW_CMD_ERROR_UNALIGNED) {
452 } else if (result == MXGEFW_CMD_ERROR_RANGE &&
453 cmd == MXGEFW_CMD_ENABLE_RSS_QUEUES &&
455 data1 & MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES) !=
459 dev_err(&mgp->pdev->dev,
460 "command %d failed, result = %d\n",
466 dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
472 * The eeprom strings on the lanaiX have the format
475 * PT:ddd mmm xx xx:xx:xx xx\0
476 * PV:ddd mmm xx xx:xx:xx xx\0
478 static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
483 ptr = mgp->eeprom_strings;
484 limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
486 while (*ptr != '\0' && ptr < limit) {
487 if (memcmp(ptr, "MAC=", 4) == 0) {
489 mgp->mac_addr_string = ptr;
490 for (i = 0; i < 6; i++) {
491 if ((ptr + 2) > limit)
494 simple_strtoul(ptr, &ptr, 16);
498 if (memcmp(ptr, "PC=", 3) == 0) {
500 mgp->product_code_string = ptr;
502 if (memcmp((const void *)ptr, "SN=", 3) == 0) {
504 mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
506 while (ptr < limit && *ptr++) ;
512 dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
517 * Enable or disable periodic RDMAs from the host to make certain
518 * chipsets resend dropped PCIe messages
521 static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
523 char __iomem *submit;
524 __be32 buf[16] __attribute__ ((__aligned__(8)));
525 u32 dma_low, dma_high;
528 /* clear confirmation addr */
532 /* send a rdma command to the PCIe engine, and wait for the
533 * response in the confirmation address. The firmware should
534 * write a -1 there to indicate it is alive and well
536 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
537 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
539 buf[0] = htonl(dma_high); /* confirm addr MSW */
540 buf[1] = htonl(dma_low); /* confirm addr LSW */
541 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
542 buf[3] = htonl(dma_high); /* dummy addr MSW */
543 buf[4] = htonl(dma_low); /* dummy addr LSW */
544 buf[5] = htonl(enable); /* enable? */
546 submit = mgp->sram + MXGEFW_BOOT_DUMMY_RDMA;
548 myri10ge_pio_copy(submit, &buf, sizeof(buf));
549 for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
551 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
552 dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
553 (enable ? "enable" : "disable"));
557 myri10ge_validate_firmware(struct myri10ge_priv *mgp,
558 struct mcp_gen_header *hdr)
560 struct device *dev = &mgp->pdev->dev;
562 /* check firmware type */
563 if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
564 dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
568 /* save firmware version for ethtool */
569 strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
571 sscanf(mgp->fw_version, "%d.%d.%d", &mgp->fw_ver_major,
572 &mgp->fw_ver_minor, &mgp->fw_ver_tiny);
574 if (!(mgp->fw_ver_major == MXGEFW_VERSION_MAJOR &&
575 mgp->fw_ver_minor == MXGEFW_VERSION_MINOR)) {
576 dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
577 dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
578 MXGEFW_VERSION_MINOR);
584 static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
586 unsigned crc, reread_crc;
587 const struct firmware *fw;
588 struct device *dev = &mgp->pdev->dev;
589 unsigned char *fw_readback;
590 struct mcp_gen_header *hdr;
595 if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
596 dev_err(dev, "Unable to load %s firmware image via hotplug\n",
599 goto abort_with_nothing;
604 if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
605 fw->size < MCP_HEADER_PTR_OFFSET + 4) {
606 dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
612 hdr_offset = ntohl(*(__be32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
613 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
614 dev_err(dev, "Bad firmware file\n");
618 hdr = (void *)(fw->data + hdr_offset);
620 status = myri10ge_validate_firmware(mgp, hdr);
624 crc = crc32(~0, fw->data, fw->size);
625 for (i = 0; i < fw->size; i += 256) {
626 myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET + i,
628 min(256U, (unsigned)(fw->size - i)));
632 fw_readback = vmalloc(fw->size);
637 /* corruption checking is good for parity recovery and buggy chipset */
638 memcpy_fromio(fw_readback, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
639 reread_crc = crc32(~0, fw_readback, fw->size);
641 if (crc != reread_crc) {
642 dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
643 (unsigned)fw->size, reread_crc, crc);
647 *size = (u32) fw->size;
650 release_firmware(fw);
656 static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
658 struct mcp_gen_header *hdr;
659 struct device *dev = &mgp->pdev->dev;
660 const size_t bytes = sizeof(struct mcp_gen_header);
664 /* find running firmware header */
665 hdr_offset = swab32(readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
667 if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
668 dev_err(dev, "Running firmware has bad header offset (%d)\n",
673 /* copy header of running firmware from SRAM to host memory to
674 * validate firmware */
675 hdr = kmalloc(bytes, GFP_KERNEL);
677 dev_err(dev, "could not malloc firmware hdr\n");
680 memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
681 status = myri10ge_validate_firmware(mgp, hdr);
684 /* check to see if adopted firmware has bug where adopting
685 * it will cause broadcasts to be filtered unless the NIC
686 * is kept in ALLMULTI mode */
687 if (mgp->fw_ver_major == 1 && mgp->fw_ver_minor == 4 &&
688 mgp->fw_ver_tiny >= 4 && mgp->fw_ver_tiny <= 11) {
689 mgp->adopted_rx_filter_bug = 1;
690 dev_warn(dev, "Adopting fw %d.%d.%d: "
691 "working around rx filter bug\n",
692 mgp->fw_ver_major, mgp->fw_ver_minor,
698 static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
700 struct myri10ge_cmd cmd;
703 /* probe for IPv6 TSO support */
704 mgp->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
705 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE,
708 mgp->max_tso6 = cmd.data0;
709 mgp->features |= NETIF_F_TSO6;
712 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
714 dev_err(&mgp->pdev->dev,
715 "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
719 mgp->max_intr_slots = 2 * (cmd.data0 / sizeof(struct mcp_dma_addr));
724 static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
726 char __iomem *submit;
727 __be32 buf[16] __attribute__ ((__aligned__(8)));
728 u32 dma_low, dma_high, size;
732 status = myri10ge_load_hotplug_firmware(mgp, &size);
736 dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
738 /* Do not attempt to adopt firmware if there
743 status = myri10ge_adopt_running_firmware(mgp);
745 dev_err(&mgp->pdev->dev,
746 "failed to adopt running firmware\n");
749 dev_info(&mgp->pdev->dev,
750 "Successfully adopted running firmware\n");
751 if (mgp->tx_boundary == 4096) {
752 dev_warn(&mgp->pdev->dev,
753 "Using firmware currently running on NIC"
755 dev_warn(&mgp->pdev->dev,
756 "performance consider loading optimized "
758 dev_warn(&mgp->pdev->dev, "via hotplug\n");
761 set_fw_name(mgp, "adopted", false);
762 mgp->tx_boundary = 2048;
763 myri10ge_dummy_rdma(mgp, 1);
764 status = myri10ge_get_firmware_capabilities(mgp);
768 /* clear confirmation addr */
772 /* send a reload command to the bootstrap MCP, and wait for the
773 * response in the confirmation address. The firmware should
774 * write a -1 there to indicate it is alive and well
776 dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
777 dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
779 buf[0] = htonl(dma_high); /* confirm addr MSW */
780 buf[1] = htonl(dma_low); /* confirm addr LSW */
781 buf[2] = MYRI10GE_NO_CONFIRM_DATA; /* confirm data */
783 /* FIX: All newest firmware should un-protect the bottom of
784 * the sram before handoff. However, the very first interfaces
785 * do not. Therefore the handoff copy must skip the first 8 bytes
787 buf[3] = htonl(MYRI10GE_FW_OFFSET + 8); /* where the code starts */
788 buf[4] = htonl(size - 8); /* length of code */
789 buf[5] = htonl(8); /* where to copy to */
790 buf[6] = htonl(0); /* where to jump to */
792 submit = mgp->sram + MXGEFW_BOOT_HANDOFF;
794 myri10ge_pio_copy(submit, &buf, sizeof(buf));
799 while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 9) {
803 if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
804 dev_err(&mgp->pdev->dev, "handoff failed\n");
807 myri10ge_dummy_rdma(mgp, 1);
808 status = myri10ge_get_firmware_capabilities(mgp);
813 static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
815 struct myri10ge_cmd cmd;
818 cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
819 | (addr[2] << 8) | addr[3]);
821 cmd.data1 = ((addr[4] << 8) | (addr[5]));
823 status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
827 static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
829 struct myri10ge_cmd cmd;
832 ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
833 status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
836 netdev_err(mgp->dev, "Failed to set flow control mode\n");
844 myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
846 struct myri10ge_cmd cmd;
849 ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
850 status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
852 netdev_err(mgp->dev, "Failed to set promisc mode\n");
855 static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
857 struct myri10ge_cmd cmd;
860 struct page *dmatest_page;
861 dma_addr_t dmatest_bus;
864 dmatest_page = alloc_page(GFP_KERNEL);
867 dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE,
870 /* Run a small DMA test.
871 * The magic multipliers to the length tell the firmware
872 * to do DMA read, write, or read+write tests. The
873 * results are returned in cmd.data0. The upper 16
874 * bits or the return is the number of transfers completed.
875 * The lower 16 bits is the time in 0.5us ticks that the
876 * transfers took to complete.
879 len = mgp->tx_boundary;
881 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
882 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
883 cmd.data2 = len * 0x10000;
884 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
889 mgp->read_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
890 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
891 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
892 cmd.data2 = len * 0x1;
893 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
898 mgp->write_dma = ((cmd.data0 >> 16) * len * 2) / (cmd.data0 & 0xffff);
900 cmd.data0 = MYRI10GE_LOWPART_TO_U32(dmatest_bus);
901 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(dmatest_bus);
902 cmd.data2 = len * 0x10001;
903 status = myri10ge_send_cmd(mgp, test_type, &cmd, 0);
908 mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
909 (cmd.data0 & 0xffff);
912 pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL);
913 put_page(dmatest_page);
915 if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST)
916 dev_warn(&mgp->pdev->dev, "DMA %s benchmark failed: %d\n",
922 static int myri10ge_reset(struct myri10ge_priv *mgp)
924 struct myri10ge_cmd cmd;
925 struct myri10ge_slice_state *ss;
928 #ifdef CONFIG_MYRI10GE_DCA
929 unsigned long dca_tag_off;
932 /* try to send a reset command to the card to see if it
934 memset(&cmd, 0, sizeof(cmd));
935 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
937 dev_err(&mgp->pdev->dev, "failed reset\n");
941 (void)myri10ge_dma_test(mgp, MXGEFW_DMA_TEST);
943 * Use non-ndis mcp_slot (eg, 4 bytes total,
944 * no toeplitz hash value returned. Older firmware will
945 * not understand this command, but will use the correct
946 * sized mcp_slot, so we ignore error returns
948 cmd.data0 = MXGEFW_RSS_MCP_SLOT_TYPE_MIN;
949 (void)myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE, &cmd, 0);
951 /* Now exchange information about interrupts */
953 bytes = mgp->max_intr_slots * sizeof(*mgp->ss[0].rx_done.entry);
954 cmd.data0 = (u32) bytes;
955 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
958 * Even though we already know how many slices are supported
959 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
960 * has magic side effects, and must be called after a reset.
961 * It must be called prior to calling any RSS related cmds,
962 * including assigning an interrupt queue for anything but
963 * slice 0. It must also be called *after*
964 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
965 * the firmware to compute offsets.
968 if (mgp->num_slices > 1) {
970 /* ask the maximum number of slices it supports */
971 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES,
974 dev_err(&mgp->pdev->dev,
975 "failed to get number of slices\n");
979 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
980 * to setting up the interrupt queue DMA
983 cmd.data0 = mgp->num_slices;
984 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
985 if (mgp->dev->real_num_tx_queues > 1)
986 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
987 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
990 /* Firmware older than 1.4.32 only supports multiple
991 * RX queues, so if we get an error, first retry using a
992 * single TX queue before giving up */
993 if (status != 0 && mgp->dev->real_num_tx_queues > 1) {
994 netif_set_real_num_tx_queues(mgp->dev, 1);
995 cmd.data0 = mgp->num_slices;
996 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
997 status = myri10ge_send_cmd(mgp,
998 MXGEFW_CMD_ENABLE_RSS_QUEUES,
1003 dev_err(&mgp->pdev->dev,
1004 "failed to set number of slices\n");
1009 for (i = 0; i < mgp->num_slices; i++) {
1011 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->rx_done.bus);
1012 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->rx_done.bus);
1014 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA,
1019 myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
1020 for (i = 0; i < mgp->num_slices; i++) {
1023 (__iomem __be32 *) (mgp->sram + cmd.data0 + 8 * i);
1025 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
1027 mgp->irq_deassert = (__iomem __be32 *) (mgp->sram + cmd.data0);
1029 status |= myri10ge_send_cmd
1030 (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
1031 mgp->intr_coal_delay_ptr = (__iomem __be32 *) (mgp->sram + cmd.data0);
1033 dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
1036 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1038 #ifdef CONFIG_MYRI10GE_DCA
1039 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0);
1040 dca_tag_off = cmd.data0;
1041 for (i = 0; i < mgp->num_slices; i++) {
1044 ss->dca_tag = (__iomem __be32 *)
1045 (mgp->sram + dca_tag_off + 4 * i);
1050 #endif /* CONFIG_MYRI10GE_DCA */
1052 /* reset mcp/driver shared state back to 0 */
1054 mgp->link_changes = 0;
1055 for (i = 0; i < mgp->num_slices; i++) {
1058 memset(ss->rx_done.entry, 0, bytes);
1061 ss->tx.pkt_start = 0;
1062 ss->tx.pkt_done = 0;
1064 ss->rx_small.cnt = 0;
1065 ss->rx_done.idx = 0;
1066 ss->rx_done.cnt = 0;
1067 ss->tx.wake_queue = 0;
1068 ss->tx.stop_queue = 0;
1071 status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
1072 myri10ge_change_pause(mgp, mgp->pause);
1073 myri10ge_set_multicast_list(mgp->dev);
1077 #ifdef CONFIG_MYRI10GE_DCA
1078 static int myri10ge_toggle_relaxed(struct pci_dev *pdev, int on)
1083 cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1087 err = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
1088 ret = (ctl & PCI_EXP_DEVCTL_RELAX_EN) >> 4;
1090 ctl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1092 pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
1098 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag)
1100 ss->cached_dca_tag = tag;
1101 put_be32(htonl(tag), ss->dca_tag);
1104 static inline void myri10ge_update_dca(struct myri10ge_slice_state *ss)
1106 int cpu = get_cpu();
1109 if (cpu != ss->cpu) {
1110 tag = dca3_get_tag(&ss->mgp->pdev->dev, cpu);
1111 if (ss->cached_dca_tag != tag)
1112 myri10ge_write_dca(ss, cpu, tag);
1118 static void myri10ge_setup_dca(struct myri10ge_priv *mgp)
1121 struct pci_dev *pdev = mgp->pdev;
1123 if (mgp->ss[0].dca_tag == NULL || mgp->dca_enabled)
1125 if (!myri10ge_dca) {
1126 dev_err(&pdev->dev, "dca disabled by administrator\n");
1129 err = dca_add_requester(&pdev->dev);
1133 "dca_add_requester() failed, err=%d\n", err);
1136 mgp->relaxed_order = myri10ge_toggle_relaxed(pdev, 0);
1137 mgp->dca_enabled = 1;
1138 for (i = 0; i < mgp->num_slices; i++) {
1139 mgp->ss[i].cpu = -1;
1140 mgp->ss[i].cached_dca_tag = -1;
1141 myri10ge_update_dca(&mgp->ss[i]);
1145 static void myri10ge_teardown_dca(struct myri10ge_priv *mgp)
1147 struct pci_dev *pdev = mgp->pdev;
1150 if (!mgp->dca_enabled)
1152 mgp->dca_enabled = 0;
1153 if (mgp->relaxed_order)
1154 myri10ge_toggle_relaxed(pdev, 1);
1155 err = dca_remove_requester(&pdev->dev);
1158 static int myri10ge_notify_dca_device(struct device *dev, void *data)
1160 struct myri10ge_priv *mgp;
1161 unsigned long event;
1163 mgp = dev_get_drvdata(dev);
1164 event = *(unsigned long *)data;
1166 if (event == DCA_PROVIDER_ADD)
1167 myri10ge_setup_dca(mgp);
1168 else if (event == DCA_PROVIDER_REMOVE)
1169 myri10ge_teardown_dca(mgp);
1172 #endif /* CONFIG_MYRI10GE_DCA */
1175 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
1176 struct mcp_kreq_ether_recv *src)
1180 low = src->addr_low;
1181 src->addr_low = htonl(DMA_BIT_MASK(32));
1182 myri10ge_pio_copy(dst, src, 4 * sizeof(*src));
1184 myri10ge_pio_copy(dst + 4, src + 4, 4 * sizeof(*src));
1186 src->addr_low = low;
1187 put_be32(low, &dst->addr_low);
1191 static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum)
1193 struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
1195 if ((skb->protocol == htons(ETH_P_8021Q)) &&
1196 (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
1197 vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
1198 skb->csum = hw_csum;
1199 skb->ip_summed = CHECKSUM_COMPLETE;
1204 myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va,
1205 struct skb_frag_struct *rx_frags, int len, int hlen)
1207 struct skb_frag_struct *skb_frags;
1209 skb->len = skb->data_len = len;
1210 skb->truesize = len + sizeof(struct sk_buff);
1211 /* attach the page(s) */
1213 skb_frags = skb_shinfo(skb)->frags;
1215 memcpy(skb_frags, rx_frags, sizeof(*skb_frags));
1216 len -= rx_frags->size;
1219 skb_shinfo(skb)->nr_frags++;
1222 /* pskb_may_pull is not available in irq context, but
1223 * skb_pull() (for ether_pad and eth_type_trans()) requires
1224 * the beginning of the packet in skb_headlen(), move it
1226 skb_copy_to_linear_data(skb, va, hlen);
1227 skb_shinfo(skb)->frags[0].page_offset += hlen;
1228 skb_shinfo(skb)->frags[0].size -= hlen;
1229 skb->data_len -= hlen;
1231 skb_pull(skb, MXGEFW_PAD);
1235 myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1236 int bytes, int watchdog)
1240 #if MYRI10GE_ALLOC_SIZE > 4096
1244 if (unlikely(rx->watchdog_needed && !watchdog))
1247 /* try to refill entire ring */
1248 while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) {
1249 idx = rx->fill_cnt & rx->mask;
1250 if (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE) {
1251 /* we can use part of previous page */
1254 /* we need a new page */
1256 alloc_pages(GFP_ATOMIC | __GFP_COMP,
1257 MYRI10GE_ALLOC_ORDER);
1258 if (unlikely(page == NULL)) {
1259 if (rx->fill_cnt - rx->cnt < 16)
1260 rx->watchdog_needed = 1;
1264 rx->page_offset = 0;
1265 rx->bus = pci_map_page(mgp->pdev, page, 0,
1266 MYRI10GE_ALLOC_SIZE,
1267 PCI_DMA_FROMDEVICE);
1269 rx->info[idx].page = rx->page;
1270 rx->info[idx].page_offset = rx->page_offset;
1271 /* note that this is the address of the start of the
1273 dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
1274 rx->shadow[idx].addr_low =
1275 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
1276 rx->shadow[idx].addr_high =
1277 htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus));
1279 /* start next packet on a cacheline boundary */
1280 rx->page_offset += SKB_DATA_ALIGN(bytes);
1282 #if MYRI10GE_ALLOC_SIZE > 4096
1283 /* don't cross a 4KB boundary */
1284 end_offset = rx->page_offset + bytes - 1;
1285 if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
1286 rx->page_offset = end_offset & ~4095;
1290 /* copy 8 descriptors to the firmware at a time */
1291 if ((idx & 7) == 7) {
1292 myri10ge_submit_8rx(&rx->lanai[idx - 7],
1293 &rx->shadow[idx - 7]);
1299 myri10ge_unmap_rx_page(struct pci_dev *pdev,
1300 struct myri10ge_rx_buffer_state *info, int bytes)
1302 /* unmap the recvd page if we're the only or last user of it */
1303 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
1304 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
1305 pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
1306 & ~(MYRI10GE_ALLOC_SIZE - 1)),
1307 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
1311 #define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
1312 * page into an skb */
1315 myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
1318 struct myri10ge_priv *mgp = ss->mgp;
1319 struct sk_buff *skb;
1320 struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME];
1321 struct myri10ge_rx_buf *rx;
1322 int i, idx, hlen, remainder, bytes;
1323 struct pci_dev *pdev = mgp->pdev;
1324 struct net_device *dev = mgp->dev;
1327 if (len <= mgp->small_bytes) {
1329 bytes = mgp->small_bytes;
1332 bytes = mgp->big_bytes;
1336 idx = rx->cnt & rx->mask;
1337 va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
1339 /* Fill skb_frag_struct(s) with data from our receive */
1340 for (i = 0, remainder = len; remainder > 0; i++) {
1341 myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes);
1342 rx_frags[i].page = rx->info[idx].page;
1343 rx_frags[i].page_offset = rx->info[idx].page_offset;
1344 if (remainder < MYRI10GE_ALLOC_SIZE)
1345 rx_frags[i].size = remainder;
1347 rx_frags[i].size = MYRI10GE_ALLOC_SIZE;
1349 idx = rx->cnt & rx->mask;
1350 remainder -= MYRI10GE_ALLOC_SIZE;
1354 rx_frags[0].page_offset += MXGEFW_PAD;
1355 rx_frags[0].size -= MXGEFW_PAD;
1357 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags,
1358 /* opaque, will come back in get_frag_header */
1360 (void *)(__force unsigned long)csum, csum);
1365 hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN;
1367 /* allocate an skb to attach the page(s) to. This is done
1368 * after trying LRO, so as to avoid skb allocation overheads */
1370 skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
1371 if (unlikely(skb == NULL)) {
1372 ss->stats.rx_dropped++;
1375 put_page(rx_frags[i].page);
1380 /* Attach the pages to the skb, and trim off any padding */
1381 myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen);
1382 if (skb_shinfo(skb)->frags[0].size <= 0) {
1383 put_page(skb_shinfo(skb)->frags[0].page);
1384 skb_shinfo(skb)->nr_frags = 0;
1386 skb->protocol = eth_type_trans(skb, dev);
1387 skb_record_rx_queue(skb, ss - &mgp->ss[0]);
1389 if (dev->features & NETIF_F_RXCSUM) {
1390 if ((skb->protocol == htons(ETH_P_IP)) ||
1391 (skb->protocol == htons(ETH_P_IPV6))) {
1393 skb->ip_summed = CHECKSUM_COMPLETE;
1395 myri10ge_vlan_ip_csum(skb, csum);
1397 netif_receive_skb(skb);
1402 myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1404 struct pci_dev *pdev = ss->mgp->pdev;
1405 struct myri10ge_tx_buf *tx = &ss->tx;
1406 struct netdev_queue *dev_queue;
1407 struct sk_buff *skb;
1410 while (tx->pkt_done != mcp_index) {
1411 idx = tx->done & tx->mask;
1412 skb = tx->info[idx].skb;
1415 tx->info[idx].skb = NULL;
1416 if (tx->info[idx].last) {
1418 tx->info[idx].last = 0;
1421 len = dma_unmap_len(&tx->info[idx], len);
1422 dma_unmap_len_set(&tx->info[idx], len, 0);
1424 ss->stats.tx_bytes += skb->len;
1425 ss->stats.tx_packets++;
1426 dev_kfree_skb_irq(skb);
1428 pci_unmap_single(pdev,
1429 dma_unmap_addr(&tx->info[idx],
1434 pci_unmap_page(pdev,
1435 dma_unmap_addr(&tx->info[idx],
1441 dev_queue = netdev_get_tx_queue(ss->dev, ss - ss->mgp->ss);
1443 * Make a minimal effort to prevent the NIC from polling an
1444 * idle tx queue. If we can't get the lock we leave the queue
1445 * active. In this case, either a thread was about to start
1446 * using the queue anyway, or we lost a race and the NIC will
1447 * waste some of its resources polling an inactive queue for a
1451 if ((ss->mgp->dev->real_num_tx_queues > 1) &&
1452 __netif_tx_trylock(dev_queue)) {
1453 if (tx->req == tx->done) {
1454 tx->queue_active = 0;
1455 put_be32(htonl(1), tx->send_stop);
1459 __netif_tx_unlock(dev_queue);
1462 /* start the queue if we've stopped it */
1463 if (netif_tx_queue_stopped(dev_queue) &&
1464 tx->req - tx->done < (tx->mask >> 1) &&
1465 ss->mgp->running == MYRI10GE_ETH_RUNNING) {
1467 netif_tx_wake_queue(dev_queue);
1472 myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
1474 struct myri10ge_rx_done *rx_done = &ss->rx_done;
1475 struct myri10ge_priv *mgp = ss->mgp;
1477 unsigned long rx_bytes = 0;
1478 unsigned long rx_packets = 0;
1479 unsigned long rx_ok;
1481 int idx = rx_done->idx;
1482 int cnt = rx_done->cnt;
1488 * Prevent compiler from generating more than one ->features memory
1489 * access to avoid theoretical race condition with functions that
1490 * change NETIF_F_LRO flag at runtime.
1492 bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
1494 while (rx_done->entry[idx].length != 0 && work_done < budget) {
1495 length = ntohs(rx_done->entry[idx].length);
1496 rx_done->entry[idx].length = 0;
1497 checksum = csum_unfold(rx_done->entry[idx].checksum);
1498 rx_ok = myri10ge_rx_done(ss, length, checksum, lro_enabled);
1499 rx_packets += rx_ok;
1500 rx_bytes += rx_ok * (unsigned long)length;
1502 idx = cnt & (mgp->max_intr_slots - 1);
1507 ss->stats.rx_packets += rx_packets;
1508 ss->stats.rx_bytes += rx_bytes;
1511 lro_flush_all(&rx_done->lro_mgr);
1513 /* restock receive rings if needed */
1514 if (ss->rx_small.fill_cnt - ss->rx_small.cnt < myri10ge_fill_thresh)
1515 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
1516 mgp->small_bytes + MXGEFW_PAD, 0);
1517 if (ss->rx_big.fill_cnt - ss->rx_big.cnt < myri10ge_fill_thresh)
1518 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
1523 static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
1525 struct mcp_irq_data *stats = mgp->ss[0].fw_stats;
1527 if (unlikely(stats->stats_updated)) {
1528 unsigned link_up = ntohl(stats->link_up);
1529 if (mgp->link_state != link_up) {
1530 mgp->link_state = link_up;
1532 if (mgp->link_state == MXGEFW_LINK_UP) {
1533 if (netif_msg_link(mgp))
1534 netdev_info(mgp->dev, "link up\n");
1535 netif_carrier_on(mgp->dev);
1536 mgp->link_changes++;
1538 if (netif_msg_link(mgp))
1539 netdev_info(mgp->dev, "link %s\n",
1540 link_up == MXGEFW_LINK_MYRINET ?
1541 "mismatch (Myrinet detected)" :
1543 netif_carrier_off(mgp->dev);
1544 mgp->link_changes++;
1547 if (mgp->rdma_tags_available !=
1548 ntohl(stats->rdma_tags_available)) {
1549 mgp->rdma_tags_available =
1550 ntohl(stats->rdma_tags_available);
1551 netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
1552 mgp->rdma_tags_available);
1554 mgp->down_cnt += stats->link_down;
1555 if (stats->link_down)
1556 wake_up(&mgp->down_wq);
1560 static int myri10ge_poll(struct napi_struct *napi, int budget)
1562 struct myri10ge_slice_state *ss =
1563 container_of(napi, struct myri10ge_slice_state, napi);
1566 #ifdef CONFIG_MYRI10GE_DCA
1567 if (ss->mgp->dca_enabled)
1568 myri10ge_update_dca(ss);
1571 /* process as many rx events as NAPI will allow */
1572 work_done = myri10ge_clean_rx_done(ss, budget);
1574 if (work_done < budget) {
1575 napi_complete(napi);
1576 put_be32(htonl(3), ss->irq_claim);
1581 static irqreturn_t myri10ge_intr(int irq, void *arg)
1583 struct myri10ge_slice_state *ss = arg;
1584 struct myri10ge_priv *mgp = ss->mgp;
1585 struct mcp_irq_data *stats = ss->fw_stats;
1586 struct myri10ge_tx_buf *tx = &ss->tx;
1587 u32 send_done_count;
1590 /* an interrupt on a non-zero receive-only slice is implicitly
1591 * valid since MSI-X irqs are not shared */
1592 if ((mgp->dev->real_num_tx_queues == 1) && (ss != mgp->ss)) {
1593 napi_schedule(&ss->napi);
1597 /* make sure it is our IRQ, and that the DMA has finished */
1598 if (unlikely(!stats->valid))
1601 /* low bit indicates receives are present, so schedule
1602 * napi poll handler */
1603 if (stats->valid & 1)
1604 napi_schedule(&ss->napi);
1606 if (!mgp->msi_enabled && !mgp->msix_enabled) {
1607 put_be32(0, mgp->irq_deassert);
1608 if (!myri10ge_deassert_wait)
1614 /* Wait for IRQ line to go low, if using INTx */
1618 /* check for transmit completes and receives */
1619 send_done_count = ntohl(stats->send_done_count);
1620 if (send_done_count != tx->pkt_done)
1621 myri10ge_tx_done(ss, (int)send_done_count);
1622 if (unlikely(i > myri10ge_max_irq_loops)) {
1623 netdev_err(mgp->dev, "irq stuck?\n");
1625 schedule_work(&mgp->watchdog_work);
1627 if (likely(stats->valid == 0))
1633 /* Only slice 0 updates stats */
1635 myri10ge_check_statblock(mgp);
1637 put_be32(htonl(3), ss->irq_claim + 1);
1642 myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
1644 struct myri10ge_priv *mgp = netdev_priv(netdev);
1648 cmd->autoneg = AUTONEG_DISABLE;
1649 ethtool_cmd_speed_set(cmd, SPEED_10000);
1650 cmd->duplex = DUPLEX_FULL;
1653 * parse the product code to deterimine the interface type
1654 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
1655 * after the 3rd dash in the driver's cached copy of the
1656 * EEPROM's product code string.
1658 ptr = mgp->product_code_string;
1660 netdev_err(netdev, "Missing product code\n");
1663 for (i = 0; i < 3; i++, ptr++) {
1664 ptr = strchr(ptr, '-');
1666 netdev_err(netdev, "Invalid product code %s\n",
1667 mgp->product_code_string);
1673 if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') {
1674 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1675 cmd->port = PORT_FIBRE;
1676 cmd->supported |= SUPPORTED_FIBRE;
1677 cmd->advertising |= ADVERTISED_FIBRE;
1679 cmd->port = PORT_OTHER;
1681 if (*ptr == 'R' || *ptr == 'S')
1682 cmd->transceiver = XCVR_EXTERNAL;
1684 cmd->transceiver = XCVR_INTERNAL;
1690 myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
1692 struct myri10ge_priv *mgp = netdev_priv(netdev);
1694 strlcpy(info->driver, "myri10ge", sizeof(info->driver));
1695 strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
1696 strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
1697 strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
1701 myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1703 struct myri10ge_priv *mgp = netdev_priv(netdev);
1705 coal->rx_coalesce_usecs = mgp->intr_coal_delay;
1710 myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
1712 struct myri10ge_priv *mgp = netdev_priv(netdev);
1714 mgp->intr_coal_delay = coal->rx_coalesce_usecs;
1715 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
1720 myri10ge_get_pauseparam(struct net_device *netdev,
1721 struct ethtool_pauseparam *pause)
1723 struct myri10ge_priv *mgp = netdev_priv(netdev);
1726 pause->rx_pause = mgp->pause;
1727 pause->tx_pause = mgp->pause;
1731 myri10ge_set_pauseparam(struct net_device *netdev,
1732 struct ethtool_pauseparam *pause)
1734 struct myri10ge_priv *mgp = netdev_priv(netdev);
1736 if (pause->tx_pause != mgp->pause)
1737 return myri10ge_change_pause(mgp, pause->tx_pause);
1738 if (pause->rx_pause != mgp->pause)
1739 return myri10ge_change_pause(mgp, pause->rx_pause);
1740 if (pause->autoneg != 0)
1746 myri10ge_get_ringparam(struct net_device *netdev,
1747 struct ethtool_ringparam *ring)
1749 struct myri10ge_priv *mgp = netdev_priv(netdev);
1751 ring->rx_mini_max_pending = mgp->ss[0].rx_small.mask + 1;
1752 ring->rx_max_pending = mgp->ss[0].rx_big.mask + 1;
1753 ring->rx_jumbo_max_pending = 0;
1754 ring->tx_max_pending = mgp->ss[0].tx.mask + 1;
1755 ring->rx_mini_pending = ring->rx_mini_max_pending;
1756 ring->rx_pending = ring->rx_max_pending;
1757 ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
1758 ring->tx_pending = ring->tx_max_pending;
1761 static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
1762 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1763 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
1764 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
1765 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
1766 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1767 "tx_heartbeat_errors", "tx_window_errors",
1768 /* device-specific stats */
1769 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1770 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1771 "serial_number", "watchdog_resets",
1772 #ifdef CONFIG_MYRI10GE_DCA
1773 "dca_capable_firmware", "dca_device_present",
1775 "link_changes", "link_up", "dropped_link_overflow",
1776 "dropped_link_error_or_filtered",
1777 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
1778 "dropped_unicast_filtered", "dropped_multicast_filtered",
1779 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
1780 "dropped_no_big_buffer"
1783 static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
1784 "----------- slice ---------",
1785 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1786 "rx_small_cnt", "rx_big_cnt",
1787 "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated",
1789 "LRO avg aggr", "LRO no_desc"
1792 #define MYRI10GE_NET_STATS_LEN 21
1793 #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
1794 #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
1797 myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
1799 struct myri10ge_priv *mgp = netdev_priv(netdev);
1802 switch (stringset) {
1804 memcpy(data, *myri10ge_gstrings_main_stats,
1805 sizeof(myri10ge_gstrings_main_stats));
1806 data += sizeof(myri10ge_gstrings_main_stats);
1807 for (i = 0; i < mgp->num_slices; i++) {
1808 memcpy(data, *myri10ge_gstrings_slice_stats,
1809 sizeof(myri10ge_gstrings_slice_stats));
1810 data += sizeof(myri10ge_gstrings_slice_stats);
1816 static int myri10ge_get_sset_count(struct net_device *netdev, int sset)
1818 struct myri10ge_priv *mgp = netdev_priv(netdev);
1822 return MYRI10GE_MAIN_STATS_LEN +
1823 mgp->num_slices * MYRI10GE_SLICE_STATS_LEN;
1830 myri10ge_get_ethtool_stats(struct net_device *netdev,
1831 struct ethtool_stats *stats, u64 * data)
1833 struct myri10ge_priv *mgp = netdev_priv(netdev);
1834 struct myri10ge_slice_state *ss;
1835 struct rtnl_link_stats64 link_stats;
1839 /* force stats update */
1840 memset(&link_stats, 0, sizeof(link_stats));
1841 (void)myri10ge_get_stats(netdev, &link_stats);
1842 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1843 data[i] = ((u64 *)&link_stats)[i];
1845 data[i++] = (unsigned int)mgp->tx_boundary;
1846 data[i++] = (unsigned int)mgp->wc_enabled;
1847 data[i++] = (unsigned int)mgp->pdev->irq;
1848 data[i++] = (unsigned int)mgp->msi_enabled;
1849 data[i++] = (unsigned int)mgp->msix_enabled;
1850 data[i++] = (unsigned int)mgp->read_dma;
1851 data[i++] = (unsigned int)mgp->write_dma;
1852 data[i++] = (unsigned int)mgp->read_write_dma;
1853 data[i++] = (unsigned int)mgp->serial_number;
1854 data[i++] = (unsigned int)mgp->watchdog_resets;
1855 #ifdef CONFIG_MYRI10GE_DCA
1856 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL);
1857 data[i++] = (unsigned int)(mgp->dca_enabled);
1859 data[i++] = (unsigned int)mgp->link_changes;
1861 /* firmware stats are useful only in the first slice */
1863 data[i++] = (unsigned int)ntohl(ss->fw_stats->link_up);
1864 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_link_overflow);
1866 (unsigned int)ntohl(ss->fw_stats->dropped_link_error_or_filtered);
1867 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_pause);
1868 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_phy);
1869 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_bad_crc32);
1870 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_unicast_filtered);
1872 (unsigned int)ntohl(ss->fw_stats->dropped_multicast_filtered);
1873 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_runt);
1874 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_overrun);
1875 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_small_buffer);
1876 data[i++] = (unsigned int)ntohl(ss->fw_stats->dropped_no_big_buffer);
1878 for (slice = 0; slice < mgp->num_slices; slice++) {
1879 ss = &mgp->ss[slice];
1881 data[i++] = (unsigned int)ss->tx.pkt_start;
1882 data[i++] = (unsigned int)ss->tx.pkt_done;
1883 data[i++] = (unsigned int)ss->tx.req;
1884 data[i++] = (unsigned int)ss->tx.done;
1885 data[i++] = (unsigned int)ss->rx_small.cnt;
1886 data[i++] = (unsigned int)ss->rx_big.cnt;
1887 data[i++] = (unsigned int)ss->tx.wake_queue;
1888 data[i++] = (unsigned int)ss->tx.stop_queue;
1889 data[i++] = (unsigned int)ss->tx.linearized;
1890 data[i++] = ss->rx_done.lro_mgr.stats.aggregated;
1891 data[i++] = ss->rx_done.lro_mgr.stats.flushed;
1892 if (ss->rx_done.lro_mgr.stats.flushed)
1893 data[i++] = ss->rx_done.lro_mgr.stats.aggregated /
1894 ss->rx_done.lro_mgr.stats.flushed;
1897 data[i++] = ss->rx_done.lro_mgr.stats.no_desc;
1901 static void myri10ge_set_msglevel(struct net_device *netdev, u32 value)
1903 struct myri10ge_priv *mgp = netdev_priv(netdev);
1904 mgp->msg_enable = value;
1907 static u32 myri10ge_get_msglevel(struct net_device *netdev)
1909 struct myri10ge_priv *mgp = netdev_priv(netdev);
1910 return mgp->msg_enable;
1913 static const struct ethtool_ops myri10ge_ethtool_ops = {
1914 .get_settings = myri10ge_get_settings,
1915 .get_drvinfo = myri10ge_get_drvinfo,
1916 .get_coalesce = myri10ge_get_coalesce,
1917 .set_coalesce = myri10ge_set_coalesce,
1918 .get_pauseparam = myri10ge_get_pauseparam,
1919 .set_pauseparam = myri10ge_set_pauseparam,
1920 .get_ringparam = myri10ge_get_ringparam,
1921 .get_link = ethtool_op_get_link,
1922 .get_strings = myri10ge_get_strings,
1923 .get_sset_count = myri10ge_get_sset_count,
1924 .get_ethtool_stats = myri10ge_get_ethtool_stats,
1925 .set_msglevel = myri10ge_set_msglevel,
1926 .get_msglevel = myri10ge_get_msglevel,
1929 static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
1931 struct myri10ge_priv *mgp = ss->mgp;
1932 struct myri10ge_cmd cmd;
1933 struct net_device *dev = mgp->dev;
1934 int tx_ring_size, rx_ring_size;
1935 int tx_ring_entries, rx_ring_entries;
1936 int i, slice, status;
1939 /* get ring sizes */
1940 slice = ss - mgp->ss;
1942 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
1943 tx_ring_size = cmd.data0;
1945 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
1948 rx_ring_size = cmd.data0;
1950 tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
1951 rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
1952 ss->tx.mask = tx_ring_entries - 1;
1953 ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1;
1957 /* allocate the host shadow rings */
1959 bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
1960 * sizeof(*ss->tx.req_list);
1961 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
1962 if (ss->tx.req_bytes == NULL)
1963 goto abort_with_nothing;
1965 /* ensure req_list entries are aligned to 8 bytes */
1966 ss->tx.req_list = (struct mcp_kreq_ether_send *)
1967 ALIGN((unsigned long)ss->tx.req_bytes, 8);
1968 ss->tx.queue_active = 0;
1970 bytes = rx_ring_entries * sizeof(*ss->rx_small.shadow);
1971 ss->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
1972 if (ss->rx_small.shadow == NULL)
1973 goto abort_with_tx_req_bytes;
1975 bytes = rx_ring_entries * sizeof(*ss->rx_big.shadow);
1976 ss->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
1977 if (ss->rx_big.shadow == NULL)
1978 goto abort_with_rx_small_shadow;
1980 /* allocate the host info rings */
1982 bytes = tx_ring_entries * sizeof(*ss->tx.info);
1983 ss->tx.info = kzalloc(bytes, GFP_KERNEL);
1984 if (ss->tx.info == NULL)
1985 goto abort_with_rx_big_shadow;
1987 bytes = rx_ring_entries * sizeof(*ss->rx_small.info);
1988 ss->rx_small.info = kzalloc(bytes, GFP_KERNEL);
1989 if (ss->rx_small.info == NULL)
1990 goto abort_with_tx_info;
1992 bytes = rx_ring_entries * sizeof(*ss->rx_big.info);
1993 ss->rx_big.info = kzalloc(bytes, GFP_KERNEL);
1994 if (ss->rx_big.info == NULL)
1995 goto abort_with_rx_small_info;
1997 /* Fill the receive rings */
1999 ss->rx_small.cnt = 0;
2000 ss->rx_big.fill_cnt = 0;
2001 ss->rx_small.fill_cnt = 0;
2002 ss->rx_small.page_offset = MYRI10GE_ALLOC_SIZE;
2003 ss->rx_big.page_offset = MYRI10GE_ALLOC_SIZE;
2004 ss->rx_small.watchdog_needed = 0;
2005 ss->rx_big.watchdog_needed = 0;
2006 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
2007 mgp->small_bytes + MXGEFW_PAD, 0);
2009 if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
2010 netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
2011 slice, ss->rx_small.fill_cnt);
2012 goto abort_with_rx_small_ring;
2015 myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
2016 if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
2017 netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
2018 slice, ss->rx_big.fill_cnt);
2019 goto abort_with_rx_big_ring;
2024 abort_with_rx_big_ring:
2025 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2026 int idx = i & ss->rx_big.mask;
2027 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2029 put_page(ss->rx_big.info[idx].page);
2032 abort_with_rx_small_ring:
2033 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2034 int idx = i & ss->rx_small.mask;
2035 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2036 mgp->small_bytes + MXGEFW_PAD);
2037 put_page(ss->rx_small.info[idx].page);
2040 kfree(ss->rx_big.info);
2042 abort_with_rx_small_info:
2043 kfree(ss->rx_small.info);
2048 abort_with_rx_big_shadow:
2049 kfree(ss->rx_big.shadow);
2051 abort_with_rx_small_shadow:
2052 kfree(ss->rx_small.shadow);
2054 abort_with_tx_req_bytes:
2055 kfree(ss->tx.req_bytes);
2056 ss->tx.req_bytes = NULL;
2057 ss->tx.req_list = NULL;
2063 static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
2065 struct myri10ge_priv *mgp = ss->mgp;
2066 struct sk_buff *skb;
2067 struct myri10ge_tx_buf *tx;
2070 /* If not allocated, skip it */
2071 if (ss->tx.req_list == NULL)
2074 for (i = ss->rx_big.cnt; i < ss->rx_big.fill_cnt; i++) {
2075 idx = i & ss->rx_big.mask;
2076 if (i == ss->rx_big.fill_cnt - 1)
2077 ss->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE;
2078 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_big.info[idx],
2080 put_page(ss->rx_big.info[idx].page);
2083 for (i = ss->rx_small.cnt; i < ss->rx_small.fill_cnt; i++) {
2084 idx = i & ss->rx_small.mask;
2085 if (i == ss->rx_small.fill_cnt - 1)
2086 ss->rx_small.info[idx].page_offset =
2087 MYRI10GE_ALLOC_SIZE;
2088 myri10ge_unmap_rx_page(mgp->pdev, &ss->rx_small.info[idx],
2089 mgp->small_bytes + MXGEFW_PAD);
2090 put_page(ss->rx_small.info[idx].page);
2093 while (tx->done != tx->req) {
2094 idx = tx->done & tx->mask;
2095 skb = tx->info[idx].skb;
2098 tx->info[idx].skb = NULL;
2100 len = dma_unmap_len(&tx->info[idx], len);
2101 dma_unmap_len_set(&tx->info[idx], len, 0);
2103 ss->stats.tx_dropped++;
2104 dev_kfree_skb_any(skb);
2106 pci_unmap_single(mgp->pdev,
2107 dma_unmap_addr(&tx->info[idx],
2112 pci_unmap_page(mgp->pdev,
2113 dma_unmap_addr(&tx->info[idx],
2118 kfree(ss->rx_big.info);
2120 kfree(ss->rx_small.info);
2124 kfree(ss->rx_big.shadow);
2126 kfree(ss->rx_small.shadow);
2128 kfree(ss->tx.req_bytes);
2129 ss->tx.req_bytes = NULL;
2130 ss->tx.req_list = NULL;
2133 static int myri10ge_request_irq(struct myri10ge_priv *mgp)
2135 struct pci_dev *pdev = mgp->pdev;
2136 struct myri10ge_slice_state *ss;
2137 struct net_device *netdev = mgp->dev;
2141 mgp->msi_enabled = 0;
2142 mgp->msix_enabled = 0;
2145 if (mgp->num_slices > 1) {
2147 pci_enable_msix(pdev, mgp->msix_vectors,
2150 mgp->msix_enabled = 1;
2153 "Error %d setting up MSI-X\n", status);
2157 if (mgp->msix_enabled == 0) {
2158 status = pci_enable_msi(pdev);
2161 "Error %d setting up MSI; falling back to xPIC\n",
2164 mgp->msi_enabled = 1;
2168 if (mgp->msix_enabled) {
2169 for (i = 0; i < mgp->num_slices; i++) {
2171 snprintf(ss->irq_desc, sizeof(ss->irq_desc),
2172 "%s:slice-%d", netdev->name, i);
2173 status = request_irq(mgp->msix_vectors[i].vector,
2174 myri10ge_intr, 0, ss->irq_desc,
2178 "slice %d failed to allocate IRQ\n", i);
2181 free_irq(mgp->msix_vectors[i].vector,
2185 pci_disable_msix(pdev);
2190 status = request_irq(pdev->irq, myri10ge_intr, IRQF_SHARED,
2191 mgp->dev->name, &mgp->ss[0]);
2193 dev_err(&pdev->dev, "failed to allocate IRQ\n");
2194 if (mgp->msi_enabled)
2195 pci_disable_msi(pdev);
2201 static void myri10ge_free_irq(struct myri10ge_priv *mgp)
2203 struct pci_dev *pdev = mgp->pdev;
2206 if (mgp->msix_enabled) {
2207 for (i = 0; i < mgp->num_slices; i++)
2208 free_irq(mgp->msix_vectors[i].vector, &mgp->ss[i]);
2210 free_irq(pdev->irq, &mgp->ss[0]);
2212 if (mgp->msi_enabled)
2213 pci_disable_msi(pdev);
2214 if (mgp->msix_enabled)
2215 pci_disable_msix(pdev);
2219 myri10ge_get_frag_header(struct skb_frag_struct *frag, void **mac_hdr,
2220 void **ip_hdr, void **tcpudp_hdr,
2221 u64 * hdr_flags, void *priv)
2224 struct vlan_ethhdr *veh;
2226 u8 *va = page_address(frag->page) + frag->page_offset;
2227 unsigned long ll_hlen;
2228 /* passed opaque through lro_receive_frags() */
2229 __wsum csum = (__force __wsum) (unsigned long)priv;
2231 /* find the mac header, aborting if not IPv4 */
2233 eh = (struct ethhdr *)va;
2236 if (eh->h_proto != htons(ETH_P_IP)) {
2237 if (eh->h_proto == htons(ETH_P_8021Q)) {
2238 veh = (struct vlan_ethhdr *)va;
2239 if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
2242 ll_hlen += VLAN_HLEN;
2245 * HW checksum starts ETH_HLEN bytes into
2246 * frame, so we must subtract off the VLAN
2247 * header's checksum before csum can be used
2249 csum = csum_sub(csum, csum_partial(va + ETH_HLEN,
2255 *hdr_flags = LRO_IPV4;
2257 iph = (struct iphdr *)(va + ll_hlen);
2259 if (iph->protocol != IPPROTO_TCP)
2261 if (ip_is_fragment(iph))
2263 *hdr_flags |= LRO_TCP;
2264 *tcpudp_hdr = (u8 *) (*ip_hdr) + (iph->ihl << 2);
2266 /* verify the IP checksum */
2267 if (unlikely(ip_fast_csum((u8 *) iph, iph->ihl)))
2270 /* verify the checksum */
2271 if (unlikely(csum_tcpudp_magic(iph->saddr, iph->daddr,
2272 ntohs(iph->tot_len) - (iph->ihl << 2),
2273 IPPROTO_TCP, csum)))
2279 static int myri10ge_get_txrx(struct myri10ge_priv *mgp, int slice)
2281 struct myri10ge_cmd cmd;
2282 struct myri10ge_slice_state *ss;
2285 ss = &mgp->ss[slice];
2287 if (slice == 0 || (mgp->dev->real_num_tx_queues > 1)) {
2289 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET,
2291 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *)
2292 (mgp->sram + cmd.data0);
2295 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET,
2297 ss->rx_small.lanai = (struct mcp_kreq_ether_recv __iomem *)
2298 (mgp->sram + cmd.data0);
2301 status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
2302 ss->rx_big.lanai = (struct mcp_kreq_ether_recv __iomem *)
2303 (mgp->sram + cmd.data0);
2305 ss->tx.send_go = (__iomem __be32 *)
2306 (mgp->sram + MXGEFW_ETH_SEND_GO + 64 * slice);
2307 ss->tx.send_stop = (__iomem __be32 *)
2308 (mgp->sram + MXGEFW_ETH_SEND_STOP + 64 * slice);
2313 static int myri10ge_set_stats(struct myri10ge_priv *mgp, int slice)
2315 struct myri10ge_cmd cmd;
2316 struct myri10ge_slice_state *ss;
2319 ss = &mgp->ss[slice];
2320 cmd.data0 = MYRI10GE_LOWPART_TO_U32(ss->fw_stats_bus);
2321 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(ss->fw_stats_bus);
2322 cmd.data2 = sizeof(struct mcp_irq_data) | (slice << 16);
2323 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd, 0);
2324 if (status == -ENOSYS) {
2325 dma_addr_t bus = ss->fw_stats_bus;
2328 bus += offsetof(struct mcp_irq_data, send_done_count);
2329 cmd.data0 = MYRI10GE_LOWPART_TO_U32(bus);
2330 cmd.data1 = MYRI10GE_HIGHPART_TO_U32(bus);
2331 status = myri10ge_send_cmd(mgp,
2332 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE,
2334 /* Firmware cannot support multicast without STATS_DMA_V2 */
2335 mgp->fw_multicast_support = 0;
2337 mgp->fw_multicast_support = 1;
2342 static int myri10ge_open(struct net_device *dev)
2344 struct myri10ge_slice_state *ss;
2345 struct myri10ge_priv *mgp = netdev_priv(dev);
2346 struct myri10ge_cmd cmd;
2347 int i, status, big_pow2, slice;
2349 struct net_lro_mgr *lro_mgr;
2351 if (mgp->running != MYRI10GE_ETH_STOPPED)
2354 mgp->running = MYRI10GE_ETH_STARTING;
2355 status = myri10ge_reset(mgp);
2357 netdev_err(dev, "failed reset\n");
2358 goto abort_with_nothing;
2361 if (mgp->num_slices > 1) {
2362 cmd.data0 = mgp->num_slices;
2363 cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE;
2364 if (mgp->dev->real_num_tx_queues > 1)
2365 cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES;
2366 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
2369 netdev_err(dev, "failed to set number of slices\n");
2370 goto abort_with_nothing;
2372 /* setup the indirection table */
2373 cmd.data0 = mgp->num_slices;
2374 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_TABLE_SIZE,
2377 status |= myri10ge_send_cmd(mgp,
2378 MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
2381 netdev_err(dev, "failed to setup rss tables\n");
2382 goto abort_with_nothing;
2385 /* just enable an identity mapping */
2386 itable = mgp->sram + cmd.data0;
2387 for (i = 0; i < mgp->num_slices; i++)
2388 __raw_writeb(i, &itable[i]);
2391 cmd.data1 = myri10ge_rss_hash;
2392 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
2395 netdev_err(dev, "failed to enable slices\n");
2396 goto abort_with_nothing;
2400 status = myri10ge_request_irq(mgp);
2402 goto abort_with_nothing;
2404 /* decide what small buffer size to use. For good TCP rx
2405 * performance, it is important to not receive 1514 byte
2406 * frames into jumbo buffers, as it confuses the socket buffer
2407 * accounting code, leading to drops and erratic performance.
2410 if (dev->mtu <= ETH_DATA_LEN)
2411 /* enough for a TCP header */
2412 mgp->small_bytes = (128 > SMP_CACHE_BYTES)
2413 ? (128 - MXGEFW_PAD)
2414 : (SMP_CACHE_BYTES - MXGEFW_PAD);
2416 /* enough for a vlan encapsulated ETH_DATA_LEN frame */
2417 mgp->small_bytes = VLAN_ETH_FRAME_LEN;
2419 /* Override the small buffer size? */
2420 if (myri10ge_small_bytes > 0)
2421 mgp->small_bytes = myri10ge_small_bytes;
2423 /* Firmware needs the big buff size as a power of 2. Lie and
2424 * tell him the buffer is larger, because we only use 1
2425 * buffer/pkt, and the mtu will prevent overruns.
2427 big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
2428 if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) {
2429 while (!is_power_of_2(big_pow2))
2431 mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD;
2433 big_pow2 = MYRI10GE_ALLOC_SIZE;
2434 mgp->big_bytes = big_pow2;
2437 /* setup the per-slice data structures */
2438 for (slice = 0; slice < mgp->num_slices; slice++) {
2439 ss = &mgp->ss[slice];
2441 status = myri10ge_get_txrx(mgp, slice);
2443 netdev_err(dev, "failed to get ring sizes or locations\n");
2444 goto abort_with_rings;
2446 status = myri10ge_allocate_rings(ss);
2448 goto abort_with_rings;
2450 /* only firmware which supports multiple TX queues
2451 * supports setting up the tx stats on non-zero
2453 if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
2454 status = myri10ge_set_stats(mgp, slice);
2456 netdev_err(dev, "Couldn't set stats DMA\n");
2457 goto abort_with_rings;
2460 lro_mgr = &ss->rx_done.lro_mgr;
2462 lro_mgr->features = LRO_F_NAPI;
2463 lro_mgr->ip_summed = CHECKSUM_COMPLETE;
2464 lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
2465 lro_mgr->max_desc = MYRI10GE_MAX_LRO_DESCRIPTORS;
2466 lro_mgr->lro_arr = ss->rx_done.lro_desc;
2467 lro_mgr->get_frag_header = myri10ge_get_frag_header;
2468 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
2469 lro_mgr->frag_align_pad = 2;
2470 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
2471 lro_mgr->max_aggr = MAX_SKB_FRAGS;
2473 /* must happen prior to any irq */
2474 napi_enable(&(ss)->napi);
2477 /* now give firmware buffers sizes, and MTU */
2478 cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
2479 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
2480 cmd.data0 = mgp->small_bytes;
2482 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
2483 cmd.data0 = big_pow2;
2485 myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
2487 netdev_err(dev, "Couldn't set buffer sizes\n");
2488 goto abort_with_rings;
2492 * Set Linux style TSO mode; this is needed only on newer
2493 * firmware versions. Older versions default to Linux
2497 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
2498 if (status && status != -ENOSYS) {
2499 netdev_err(dev, "Couldn't set TSO mode\n");
2500 goto abort_with_rings;
2503 mgp->link_state = ~0U;
2504 mgp->rdma_tags_available = 15;
2506 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
2508 netdev_err(dev, "Couldn't bring up link\n");
2509 goto abort_with_rings;
2512 mgp->running = MYRI10GE_ETH_RUNNING;
2513 mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
2514 add_timer(&mgp->watchdog_timer);
2515 netif_tx_wake_all_queues(dev);
2522 napi_disable(&mgp->ss[slice].napi);
2524 for (i = 0; i < mgp->num_slices; i++)
2525 myri10ge_free_rings(&mgp->ss[i]);
2527 myri10ge_free_irq(mgp);
2530 mgp->running = MYRI10GE_ETH_STOPPED;
2534 static int myri10ge_close(struct net_device *dev)
2536 struct myri10ge_priv *mgp = netdev_priv(dev);
2537 struct myri10ge_cmd cmd;
2538 int status, old_down_cnt;
2541 if (mgp->running != MYRI10GE_ETH_RUNNING)
2544 if (mgp->ss[0].tx.req_bytes == NULL)
2547 del_timer_sync(&mgp->watchdog_timer);
2548 mgp->running = MYRI10GE_ETH_STOPPING;
2549 for (i = 0; i < mgp->num_slices; i++) {
2550 napi_disable(&mgp->ss[i].napi);
2552 netif_carrier_off(dev);
2554 netif_tx_stop_all_queues(dev);
2555 if (mgp->rebooted == 0) {
2556 old_down_cnt = mgp->down_cnt;
2559 myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
2561 netdev_err(dev, "Couldn't bring down link\n");
2563 wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
2565 if (old_down_cnt == mgp->down_cnt)
2566 netdev_err(dev, "never got down irq\n");
2568 netif_tx_disable(dev);
2569 myri10ge_free_irq(mgp);
2570 for (i = 0; i < mgp->num_slices; i++)
2571 myri10ge_free_rings(&mgp->ss[i]);
2573 mgp->running = MYRI10GE_ETH_STOPPED;
2577 /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2578 * backwards one at a time and handle ring wraps */
2581 myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
2582 struct mcp_kreq_ether_send *src, int cnt)
2584 int idx, starting_slot;
2585 starting_slot = tx->req;
2588 idx = (starting_slot + cnt) & tx->mask;
2589 myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
2595 * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2596 * at most 32 bytes at a time, so as to avoid involving the software
2597 * pio handler in the nic. We re-write the first segment's flags
2598 * to mark them valid only after writing the entire chain.
2602 myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
2606 struct mcp_kreq_ether_send __iomem *dstp, *dst;
2607 struct mcp_kreq_ether_send *srcp;
2610 idx = tx->req & tx->mask;
2612 last_flags = src->flags;
2615 dst = dstp = &tx->lanai[idx];
2618 if ((idx + cnt) < tx->mask) {
2619 for (i = 0; i < (cnt - 1); i += 2) {
2620 myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
2621 mb(); /* force write every 32 bytes */
2626 /* submit all but the first request, and ensure
2627 * that it is submitted below */
2628 myri10ge_submit_req_backwards(tx, src, cnt);
2632 /* submit the first request */
2633 myri10ge_pio_copy(dstp, srcp, sizeof(*src));
2634 mb(); /* barrier before setting valid flag */
2637 /* re-write the last 32-bits with the valid flags */
2638 src->flags = last_flags;
2639 put_be32(*((__be32 *) src + 3), (__be32 __iomem *) dst + 3);
2645 * Transmit a packet. We need to split the packet so that a single
2646 * segment does not cross myri10ge->tx_boundary, so this makes segment
2647 * counting tricky. So rather than try to count segments up front, we
2648 * just give up if there are too few segments to hold a reasonably
2649 * fragmented packet currently available. If we run
2650 * out of segments while preparing a packet for DMA, we just linearize
2654 static netdev_tx_t myri10ge_xmit(struct sk_buff *skb,
2655 struct net_device *dev)
2657 struct myri10ge_priv *mgp = netdev_priv(dev);
2658 struct myri10ge_slice_state *ss;
2659 struct mcp_kreq_ether_send *req;
2660 struct myri10ge_tx_buf *tx;
2661 struct skb_frag_struct *frag;
2662 struct netdev_queue *netdev_queue;
2665 __be32 high_swapped;
2667 int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
2668 u16 pseudo_hdr_offset, cksum_offset, queue;
2669 int cum_len, seglen, boundary, rdma_count;
2672 queue = skb_get_queue_mapping(skb);
2673 ss = &mgp->ss[queue];
2674 netdev_queue = netdev_get_tx_queue(mgp->dev, queue);
2679 avail = tx->mask - 1 - (tx->req - tx->done);
2682 max_segments = MXGEFW_MAX_SEND_DESC;
2684 if (skb_is_gso(skb)) {
2685 mss = skb_shinfo(skb)->gso_size;
2686 max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
2689 if ((unlikely(avail < max_segments))) {
2690 /* we are out of transmit resources */
2692 netif_tx_stop_queue(netdev_queue);
2693 return NETDEV_TX_BUSY;
2696 /* Setup checksum offloading, if needed */
2698 pseudo_hdr_offset = 0;
2700 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
2701 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2702 cksum_offset = skb_checksum_start_offset(skb);
2703 pseudo_hdr_offset = cksum_offset + skb->csum_offset;
2704 /* If the headers are excessively large, then we must
2705 * fall back to a software checksum */
2706 if (unlikely(!mss && (cksum_offset > 255 ||
2707 pseudo_hdr_offset > 127))) {
2708 if (skb_checksum_help(skb))
2711 pseudo_hdr_offset = 0;
2713 odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
2714 flags |= MXGEFW_FLAGS_CKSUM;
2720 if (mss) { /* TSO */
2721 /* this removes any CKSUM flag from before */
2722 flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
2724 /* negative cum_len signifies to the
2725 * send loop that we are still in the
2726 * header portion of the TSO packet.
2727 * TSO header can be at most 1KB long */
2728 cum_len = -(skb_transport_offset(skb) + tcp_hdrlen(skb));
2730 /* for IPv6 TSO, the checksum offset stores the
2731 * TCP header length, to save the firmware from
2732 * the need to parse the headers */
2733 if (skb_is_gso_v6(skb)) {
2734 cksum_offset = tcp_hdrlen(skb);
2735 /* Can only handle headers <= max_tso6 long */
2736 if (unlikely(-cum_len > mgp->max_tso6))
2737 return myri10ge_sw_tso(skb, dev);
2739 /* for TSO, pseudo_hdr_offset holds mss.
2740 * The firmware figures out where to put
2741 * the checksum by parsing the header. */
2742 pseudo_hdr_offset = mss;
2744 /* Mark small packets, and pad out tiny packets */
2745 if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
2746 flags |= MXGEFW_FLAGS_SMALL;
2748 /* pad frames to at least ETH_ZLEN bytes */
2749 if (unlikely(skb->len < ETH_ZLEN)) {
2750 if (skb_padto(skb, ETH_ZLEN)) {
2751 /* The packet is gone, so we must
2753 ss->stats.tx_dropped += 1;
2754 return NETDEV_TX_OK;
2756 /* adjust the len to account for the zero pad
2757 * so that the nic can know how long it is */
2758 skb->len = ETH_ZLEN;
2762 /* map the skb for DMA */
2763 len = skb_headlen(skb);
2764 idx = tx->req & tx->mask;
2765 tx->info[idx].skb = skb;
2766 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2767 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2768 dma_unmap_len_set(&tx->info[idx], len, len);
2770 frag_cnt = skb_shinfo(skb)->nr_frags;
2775 /* "rdma_count" is the number of RDMAs belonging to the
2776 * current packet BEFORE the current send request. For
2777 * non-TSO packets, this is equal to "count".
2778 * For TSO packets, rdma_count needs to be reset
2779 * to 0 after a segment cut.
2781 * The rdma_count field of the send request is
2782 * the number of RDMAs of the packet starting at
2783 * that request. For TSO send requests with one ore more cuts
2784 * in the middle, this is the number of RDMAs starting
2785 * after the last cut in the request. All previous
2786 * segments before the last cut implicitly have 1 RDMA.
2788 * Since the number of RDMAs is not known beforehand,
2789 * it must be filled-in retroactively - after each
2790 * segmentation cut or at the end of the entire packet.
2794 /* Break the SKB or Fragment up into pieces which
2795 * do not cross mgp->tx_boundary */
2796 low = MYRI10GE_LOWPART_TO_U32(bus);
2797 high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
2802 if (unlikely(count == max_segments))
2803 goto abort_linearize;
2806 (low + mgp->tx_boundary) & ~(mgp->tx_boundary - 1);
2807 seglen = boundary - low;
2810 flags_next = flags & ~MXGEFW_FLAGS_FIRST;
2811 cum_len_next = cum_len + seglen;
2812 if (mss) { /* TSO */
2813 (req - rdma_count)->rdma_count = rdma_count + 1;
2815 if (likely(cum_len >= 0)) { /* payload */
2816 int next_is_first, chop;
2818 chop = (cum_len_next > mss);
2819 cum_len_next = cum_len_next % mss;
2820 next_is_first = (cum_len_next == 0);
2821 flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
2822 flags_next |= next_is_first *
2824 rdma_count |= -(chop | next_is_first);
2825 rdma_count += chop & !next_is_first;
2826 } else if (likely(cum_len_next >= 0)) { /* header ends */
2832 small = (mss <= MXGEFW_SEND_SMALL_SIZE);
2833 flags_next = MXGEFW_FLAGS_TSO_PLD |
2834 MXGEFW_FLAGS_FIRST |
2835 (small * MXGEFW_FLAGS_SMALL);
2838 req->addr_high = high_swapped;
2839 req->addr_low = htonl(low);
2840 req->pseudo_hdr_offset = htons(pseudo_hdr_offset);
2841 req->pad = 0; /* complete solid 16-byte block; does this matter? */
2842 req->rdma_count = 1;
2843 req->length = htons(seglen);
2844 req->cksum_offset = cksum_offset;
2845 req->flags = flags | ((cum_len & 1) * odd_flag);
2849 cum_len = cum_len_next;
2854 if (cksum_offset != 0 && !(mss && skb_is_gso_v6(skb))) {
2855 if (unlikely(cksum_offset > seglen))
2856 cksum_offset -= seglen;
2861 if (frag_idx == frag_cnt)
2864 /* map next fragment for DMA */
2865 idx = (count + tx->req) & tx->mask;
2866 frag = &skb_shinfo(skb)->frags[frag_idx];
2869 bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
2870 len, PCI_DMA_TODEVICE);
2871 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2872 dma_unmap_len_set(&tx->info[idx], len, len);
2875 (req - rdma_count)->rdma_count = rdma_count;
2879 req->flags |= MXGEFW_FLAGS_TSO_LAST;
2880 } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
2881 MXGEFW_FLAGS_FIRST)));
2882 idx = ((count - 1) + tx->req) & tx->mask;
2883 tx->info[idx].last = 1;
2884 myri10ge_submit_req(tx, tx->req_list, count);
2885 /* if using multiple tx queues, make sure NIC polls the
2887 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
2888 tx->queue_active = 1;
2889 put_be32(htonl(1), tx->send_go);
2894 if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
2896 netif_tx_stop_queue(netdev_queue);
2898 return NETDEV_TX_OK;
2901 /* Free any DMA resources we've alloced and clear out the skb
2902 * slot so as to not trip up assertions, and to avoid a
2903 * double-free if linearizing fails */
2905 last_idx = (idx + 1) & tx->mask;
2906 idx = tx->req & tx->mask;
2907 tx->info[idx].skb = NULL;
2909 len = dma_unmap_len(&tx->info[idx], len);
2911 if (tx->info[idx].skb != NULL)
2912 pci_unmap_single(mgp->pdev,
2913 dma_unmap_addr(&tx->info[idx],
2917 pci_unmap_page(mgp->pdev,
2918 dma_unmap_addr(&tx->info[idx],
2921 dma_unmap_len_set(&tx->info[idx], len, 0);
2922 tx->info[idx].skb = NULL;
2924 idx = (idx + 1) & tx->mask;
2925 } while (idx != last_idx);
2926 if (skb_is_gso(skb)) {
2927 netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
2931 if (skb_linearize(skb))
2938 dev_kfree_skb_any(skb);
2939 ss->stats.tx_dropped += 1;
2940 return NETDEV_TX_OK;
2944 static netdev_tx_t myri10ge_sw_tso(struct sk_buff *skb,
2945 struct net_device *dev)
2947 struct sk_buff *segs, *curr;
2948 struct myri10ge_priv *mgp = netdev_priv(dev);
2949 struct myri10ge_slice_state *ss;
2952 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
2960 status = myri10ge_xmit(curr, dev);
2962 dev_kfree_skb_any(curr);
2967 dev_kfree_skb_any(segs);
2972 dev_kfree_skb_any(skb);
2973 return NETDEV_TX_OK;
2976 ss = &mgp->ss[skb_get_queue_mapping(skb)];
2977 dev_kfree_skb_any(skb);
2978 ss->stats.tx_dropped += 1;
2979 return NETDEV_TX_OK;
2982 static struct rtnl_link_stats64 *myri10ge_get_stats(struct net_device *dev,
2983 struct rtnl_link_stats64 *stats)
2985 const struct myri10ge_priv *mgp = netdev_priv(dev);
2986 const struct myri10ge_slice_netstats *slice_stats;
2989 for (i = 0; i < mgp->num_slices; i++) {
2990 slice_stats = &mgp->ss[i].stats;
2991 stats->rx_packets += slice_stats->rx_packets;
2992 stats->tx_packets += slice_stats->tx_packets;
2993 stats->rx_bytes += slice_stats->rx_bytes;
2994 stats->tx_bytes += slice_stats->tx_bytes;
2995 stats->rx_dropped += slice_stats->rx_dropped;
2996 stats->tx_dropped += slice_stats->tx_dropped;
3001 static void myri10ge_set_multicast_list(struct net_device *dev)
3003 struct myri10ge_priv *mgp = netdev_priv(dev);
3004 struct myri10ge_cmd cmd;
3005 struct netdev_hw_addr *ha;
3006 __be32 data[2] = { 0, 0 };
3009 /* can be called from atomic contexts,
3010 * pass 1 to force atomicity in myri10ge_send_cmd() */
3011 myri10ge_change_promisc(mgp, dev->flags & IFF_PROMISC, 1);
3013 /* This firmware is known to not support multicast */
3014 if (!mgp->fw_multicast_support)
3017 /* Disable multicast filtering */
3019 err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
3021 netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
3026 if ((dev->flags & IFF_ALLMULTI) || mgp->adopted_rx_filter_bug) {
3027 /* request to disable multicast filtering, so quit here */
3031 /* Flush the filters */
3033 err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
3036 netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
3041 /* Walk the multicast list, and add each address */
3042 netdev_for_each_mc_addr(ha, dev) {
3043 memcpy(data, &ha->addr, 6);
3044 cmd.data0 = ntohl(data[0]);
3045 cmd.data1 = ntohl(data[1]);
3046 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
3050 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3055 /* Enable multicast filtering */
3056 err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
3058 netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
3069 static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
3071 struct sockaddr *sa = addr;
3072 struct myri10ge_priv *mgp = netdev_priv(dev);
3075 if (!is_valid_ether_addr(sa->sa_data))
3076 return -EADDRNOTAVAIL;
3078 status = myri10ge_update_mac_address(mgp, sa->sa_data);
3080 netdev_err(dev, "changing mac address failed with %d\n",
3085 /* change the dev structure */
3086 memcpy(dev->dev_addr, sa->sa_data, 6);
3090 static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
3092 if (!(features & NETIF_F_RXCSUM))
3093 features &= ~NETIF_F_LRO;
3098 static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
3100 struct myri10ge_priv *mgp = netdev_priv(dev);
3103 if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
3104 netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
3107 netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
3109 /* if we change the mtu on an active device, we must
3110 * reset the device so the firmware sees the change */
3111 myri10ge_close(dev);
3121 * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
3122 * Only do it if the bridge is a root port since we don't want to disturb
3123 * any other device, except if forced with myri10ge_ecrc_enable > 1.
3126 static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
3128 struct pci_dev *bridge = mgp->pdev->bus->self;
3129 struct device *dev = &mgp->pdev->dev;
3136 if (!myri10ge_ecrc_enable || !bridge)
3139 /* check that the bridge is a root port */
3140 cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
3141 pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
3142 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
3143 if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
3144 if (myri10ge_ecrc_enable > 1) {
3145 struct pci_dev *prev_bridge, *old_bridge = bridge;
3147 /* Walk the hierarchy up to the root port
3148 * where ECRC has to be enabled */
3150 prev_bridge = bridge;
3151 bridge = bridge->bus->self;
3152 if (!bridge || prev_bridge == bridge) {
3154 "Failed to find root port"
3155 " to force ECRC\n");
3159 pci_find_capability(bridge, PCI_CAP_ID_EXP);
3160 pci_read_config_word(bridge,
3161 cap + PCI_CAP_FLAGS, &val);
3162 ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
3163 } while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
3166 "Forcing ECRC on non-root port %s"
3167 " (enabling on root port %s)\n",
3168 pci_name(old_bridge), pci_name(bridge));
3171 "Not enabling ECRC on non-root port %s\n",
3177 cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
3181 ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
3183 dev_err(dev, "failed reading ext-conf-space of %s\n",
3185 dev_err(dev, "\t pci=nommconf in use? "
3186 "or buggy/incomplete/absent ACPI MCFG attr?\n");
3189 if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
3192 err_cap |= PCI_ERR_CAP_ECRC_GENE;
3193 pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
3194 dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
3198 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
3199 * when the PCI-E Completion packets are aligned on an 8-byte
3200 * boundary. Some PCI-E chip sets always align Completion packets; on
3201 * the ones that do not, the alignment can be enforced by enabling
3202 * ECRC generation (if supported).
3204 * When PCI-E Completion packets are not aligned, it is actually more
3205 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
3207 * If the driver can neither enable ECRC nor verify that it has
3208 * already been enabled, then it must use a firmware image which works
3209 * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
3210 * should also ensure that it never gives the device a Read-DMA which is
3211 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
3212 * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
3213 * firmware image, and set tx_boundary to 4KB.
3216 static void myri10ge_firmware_probe(struct myri10ge_priv *mgp)
3218 struct pci_dev *pdev = mgp->pdev;
3219 struct device *dev = &pdev->dev;
3222 mgp->tx_boundary = 4096;
3224 * Verify the max read request size was set to 4KB
3225 * before trying the test with 4KB.
3227 status = pcie_get_readrq(pdev);
3229 dev_err(dev, "Couldn't read max read req size: %d\n", status);
3232 if (status != 4096) {
3233 dev_warn(dev, "Max Read Request size != 4096 (%d)\n", status);
3234 mgp->tx_boundary = 2048;
3237 * load the optimized firmware (which assumes aligned PCIe
3238 * completions) in order to see if it works on this host.
3240 set_fw_name(mgp, myri10ge_fw_aligned, false);
3241 status = myri10ge_load_firmware(mgp, 1);
3247 * Enable ECRC if possible
3249 myri10ge_enable_ecrc(mgp);
3252 * Run a DMA test which watches for unaligned completions and
3253 * aborts on the first one seen.
3256 status = myri10ge_dma_test(mgp, MXGEFW_CMD_UNALIGNED_TEST);
3258 return; /* keep the aligned firmware */
3260 if (status != -E2BIG)
3261 dev_warn(dev, "DMA test failed: %d\n", status);
3262 if (status == -ENOSYS)
3263 dev_warn(dev, "Falling back to ethp! "
3264 "Please install up to date fw\n");
3266 /* fall back to using the unaligned firmware */
3267 mgp->tx_boundary = 2048;
3268 set_fw_name(mgp, myri10ge_fw_unaligned, false);
3272 static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
3276 if (myri10ge_force_firmware == 0) {
3277 int link_width, exp_cap;
3280 exp_cap = pci_find_capability(mgp->pdev, PCI_CAP_ID_EXP);
3281 pci_read_config_word(mgp->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
3282 link_width = (lnk >> 4) & 0x3f;
3284 /* Check to see if Link is less than 8 or if the
3285 * upstream bridge is known to provide aligned
3287 if (link_width < 8) {
3288 dev_info(&mgp->pdev->dev, "PCIE x%d Link\n",
3290 mgp->tx_boundary = 4096;
3291 set_fw_name(mgp, myri10ge_fw_aligned, false);
3293 myri10ge_firmware_probe(mgp);
3296 if (myri10ge_force_firmware == 1) {
3297 dev_info(&mgp->pdev->dev,
3298 "Assuming aligned completions (forced)\n");
3299 mgp->tx_boundary = 4096;
3300 set_fw_name(mgp, myri10ge_fw_aligned, false);
3302 dev_info(&mgp->pdev->dev,
3303 "Assuming unaligned completions (forced)\n");
3304 mgp->tx_boundary = 2048;
3305 set_fw_name(mgp, myri10ge_fw_unaligned, false);
3309 kparam_block_sysfs_write(myri10ge_fw_name);
3310 if (myri10ge_fw_name != NULL) {
3311 char *fw_name = kstrdup(myri10ge_fw_name, GFP_KERNEL);
3314 set_fw_name(mgp, fw_name, true);
3317 kparam_unblock_sysfs_write(myri10ge_fw_name);
3319 if (mgp->board_number < MYRI10GE_MAX_BOARDS &&
3320 myri10ge_fw_names[mgp->board_number] != NULL &&
3321 strlen(myri10ge_fw_names[mgp->board_number])) {
3322 set_fw_name(mgp, myri10ge_fw_names[mgp->board_number], false);
3326 dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
3331 static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
3333 struct myri10ge_priv *mgp;
3334 struct net_device *netdev;
3336 mgp = pci_get_drvdata(pdev);
3341 netif_device_detach(netdev);
3342 if (netif_running(netdev)) {
3343 netdev_info(netdev, "closing\n");
3345 myri10ge_close(netdev);
3348 myri10ge_dummy_rdma(mgp, 0);
3349 pci_save_state(pdev);
3350 pci_disable_device(pdev);
3352 return pci_set_power_state(pdev, pci_choose_state(pdev, state));
3355 static int myri10ge_resume(struct pci_dev *pdev)
3357 struct myri10ge_priv *mgp;
3358 struct net_device *netdev;
3362 mgp = pci_get_drvdata(pdev);
3366 pci_set_power_state(pdev, 0); /* zeros conf space as a side effect */
3367 msleep(5); /* give card time to respond */
3368 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3369 if (vendor == 0xffff) {
3370 netdev_err(mgp->dev, "device disappeared!\n");
3374 pci_restore_state(pdev);
3376 status = pci_enable_device(pdev);
3378 dev_err(&pdev->dev, "failed to enable device\n");
3382 pci_set_master(pdev);
3384 myri10ge_reset(mgp);
3385 myri10ge_dummy_rdma(mgp, 1);
3387 /* Save configuration space to be restored if the
3388 * nic resets due to a parity error */
3389 pci_save_state(pdev);
3391 if (netif_running(netdev)) {
3393 status = myri10ge_open(netdev);
3396 goto abort_with_enabled;
3399 netif_device_attach(netdev);
3404 pci_disable_device(pdev);
3408 #endif /* CONFIG_PM */
3410 static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
3412 struct pci_dev *pdev = mgp->pdev;
3413 int vs = mgp->vendor_specific_offset;
3416 /*enter read32 mode */
3417 pci_write_config_byte(pdev, vs + 0x10, 0x3);
3419 /*read REBOOT_STATUS (0xfffffff0) */
3420 pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
3421 pci_read_config_dword(pdev, vs + 0x14, &reboot);
3426 * This watchdog is used to check whether the board has suffered
3427 * from a parity error and needs to be recovered.
3429 static void myri10ge_watchdog(struct work_struct *work)
3431 struct myri10ge_priv *mgp =
3432 container_of(work, struct myri10ge_priv, watchdog_work);
3433 struct myri10ge_tx_buf *tx;
3435 int status, rebooted;
3439 mgp->watchdog_resets++;
3440 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
3442 if ((cmd & PCI_COMMAND_MASTER) == 0) {
3443 /* Bus master DMA disabled? Check to see
3444 * if the card rebooted due to a parity error
3445 * For now, just report it */
3446 reboot = myri10ge_read_reboot(mgp);
3447 netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
3449 myri10ge_reset_recover ? "" : " not");
3450 if (myri10ge_reset_recover == 0)
3455 myri10ge_close(mgp->dev);
3456 myri10ge_reset_recover--;
3459 * A rebooted nic will come back with config space as
3460 * it was after power was applied to PCIe bus.
3461 * Attempt to restore config space which was saved
3462 * when the driver was loaded, or the last time the
3463 * nic was resumed from power saving mode.
3465 pci_restore_state(mgp->pdev);
3467 /* save state again for accounting reasons */
3468 pci_save_state(mgp->pdev);
3471 /* if we get back -1's from our slot, perhaps somebody
3472 * powered off our card. Don't try to reset it in
3474 if (cmd == 0xffff) {
3475 pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
3476 if (vendor == 0xffff) {
3477 netdev_err(mgp->dev, "device disappeared!\n");
3481 /* Perhaps it is a software error. Try to reset */
3483 netdev_err(mgp->dev, "device timeout, resetting\n");
3484 for (i = 0; i < mgp->num_slices; i++) {
3485 tx = &mgp->ss[i].tx;
3486 netdev_err(mgp->dev, "(%d): %d %d %d %d %d %d\n",
3487 i, tx->queue_active, tx->req,
3488 tx->done, tx->pkt_start, tx->pkt_done,
3489 (int)ntohl(mgp->ss[i].fw_stats->
3492 netdev_info(mgp->dev, "(%d): %d %d %d %d %d %d\n",
3493 i, tx->queue_active, tx->req,
3494 tx->done, tx->pkt_start, tx->pkt_done,
3495 (int)ntohl(mgp->ss[i].fw_stats->
3502 myri10ge_close(mgp->dev);
3504 status = myri10ge_load_firmware(mgp, 1);
3506 netdev_err(mgp->dev, "failed to load firmware\n");
3508 myri10ge_open(mgp->dev);
3513 * We use our own timer routine rather than relying upon
3514 * netdev->tx_timeout because we have a very large hardware transmit
3515 * queue. Due to the large queue, the netdev->tx_timeout function
3516 * cannot detect a NIC with a parity error in a timely fashion if the
3517 * NIC is lightly loaded.
3519 static void myri10ge_watchdog_timer(unsigned long arg)
3521 struct myri10ge_priv *mgp;
3522 struct myri10ge_slice_state *ss;
3523 int i, reset_needed, busy_slice_cnt;
3527 mgp = (struct myri10ge_priv *)arg;
3529 rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
3531 for (i = 0, reset_needed = 0;
3532 i < mgp->num_slices && reset_needed == 0; ++i) {
3535 if (ss->rx_small.watchdog_needed) {
3536 myri10ge_alloc_rx_pages(mgp, &ss->rx_small,
3537 mgp->small_bytes + MXGEFW_PAD,
3539 if (ss->rx_small.fill_cnt - ss->rx_small.cnt >=
3540 myri10ge_fill_thresh)
3541 ss->rx_small.watchdog_needed = 0;
3543 if (ss->rx_big.watchdog_needed) {
3544 myri10ge_alloc_rx_pages(mgp, &ss->rx_big,
3546 if (ss->rx_big.fill_cnt - ss->rx_big.cnt >=
3547 myri10ge_fill_thresh)
3548 ss->rx_big.watchdog_needed = 0;
3551 if (ss->tx.req != ss->tx.done &&
3552 ss->tx.done == ss->watchdog_tx_done &&
3553 ss->watchdog_tx_req != ss->watchdog_tx_done) {
3554 /* nic seems like it might be stuck.. */
3555 if (rx_pause_cnt != mgp->watchdog_pause) {
3556 if (net_ratelimit())
3557 netdev_err(mgp->dev, "slice %d: TX paused, check link partner\n",
3560 netdev_warn(mgp->dev, "slice %d stuck:", i);
3564 if (ss->watchdog_tx_done != ss->tx.done ||
3565 ss->watchdog_rx_done != ss->rx_done.cnt) {
3568 ss->watchdog_tx_done = ss->tx.done;
3569 ss->watchdog_tx_req = ss->tx.req;
3570 ss->watchdog_rx_done = ss->rx_done.cnt;
3572 /* if we've sent or received no traffic, poll the NIC to
3573 * ensure it is still there. Otherwise, we risk not noticing
3574 * an error in a timely fashion */
3575 if (busy_slice_cnt == 0) {
3576 pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
3577 if ((cmd & PCI_COMMAND_MASTER) == 0) {
3581 mgp->watchdog_pause = rx_pause_cnt;
3584 schedule_work(&mgp->watchdog_work);
3587 mod_timer(&mgp->watchdog_timer,
3588 jiffies + myri10ge_watchdog_timeout * HZ);
3592 static void myri10ge_free_slices(struct myri10ge_priv *mgp)
3594 struct myri10ge_slice_state *ss;
3595 struct pci_dev *pdev = mgp->pdev;
3599 if (mgp->ss == NULL)
3602 for (i = 0; i < mgp->num_slices; i++) {
3604 if (ss->rx_done.entry != NULL) {
3605 bytes = mgp->max_intr_slots *
3606 sizeof(*ss->rx_done.entry);
3607 dma_free_coherent(&pdev->dev, bytes,
3608 ss->rx_done.entry, ss->rx_done.bus);
3609 ss->rx_done.entry = NULL;
3611 if (ss->fw_stats != NULL) {
3612 bytes = sizeof(*ss->fw_stats);
3613 dma_free_coherent(&pdev->dev, bytes,
3614 ss->fw_stats, ss->fw_stats_bus);
3615 ss->fw_stats = NULL;
3616 netif_napi_del(&ss->napi);
3623 static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3625 struct myri10ge_slice_state *ss;
3626 struct pci_dev *pdev = mgp->pdev;
3630 bytes = sizeof(*mgp->ss) * mgp->num_slices;
3631 mgp->ss = kzalloc(bytes, GFP_KERNEL);
3632 if (mgp->ss == NULL) {
3636 for (i = 0; i < mgp->num_slices; i++) {
3638 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3639 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3642 if (ss->rx_done.entry == NULL)
3644 memset(ss->rx_done.entry, 0, bytes);
3645 bytes = sizeof(*ss->fw_stats);
3646 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3649 if (ss->fw_stats == NULL)
3653 netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
3654 myri10ge_napi_weight);
3658 myri10ge_free_slices(mgp);
3663 * This function determines the number of slices supported.
3664 * The number slices is the minimum of the number of CPUS,
3665 * the number of MSI-X irqs supported, the number of slices
3666 * supported by the firmware
3668 static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
3670 struct myri10ge_cmd cmd;
3671 struct pci_dev *pdev = mgp->pdev;
3674 int i, status, ncpus, msix_cap;
3676 mgp->num_slices = 1;
3677 msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3678 ncpus = num_online_cpus();
3680 if (myri10ge_max_slices == 1 || msix_cap == 0 ||
3681 (myri10ge_max_slices == -1 && ncpus < 2))
3684 /* try to load the slice aware rss firmware */
3685 old_fw = mgp->fw_name;
3686 old_allocated = mgp->fw_name_allocated;
3687 /* don't free old_fw if we override it. */
3688 mgp->fw_name_allocated = false;
3690 if (myri10ge_fw_name != NULL) {
3691 dev_info(&mgp->pdev->dev, "overriding rss firmware to %s\n",
3693 set_fw_name(mgp, myri10ge_fw_name, false);
3694 } else if (old_fw == myri10ge_fw_aligned)
3695 set_fw_name(mgp, myri10ge_fw_rss_aligned, false);
3697 set_fw_name(mgp, myri10ge_fw_rss_unaligned, false);
3698 status = myri10ge_load_firmware(mgp, 0);
3700 dev_info(&pdev->dev, "Rss firmware not found\n");
3706 /* hit the board with a reset to ensure it is alive */
3707 memset(&cmd, 0, sizeof(cmd));
3708 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
3710 dev_err(&mgp->pdev->dev, "failed reset\n");
3714 mgp->max_intr_slots = cmd.data0 / sizeof(struct mcp_slot);
3716 /* tell it the size of the interrupt queues */
3717 cmd.data0 = mgp->max_intr_slots * sizeof(struct mcp_slot);
3718 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
3720 dev_err(&mgp->pdev->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
3724 /* ask the maximum number of slices it supports */
3725 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd, 0);
3729 mgp->num_slices = cmd.data0;
3731 /* Only allow multiple slices if MSI-X is usable */
3732 if (!myri10ge_msi) {
3736 /* if the admin did not specify a limit to how many
3737 * slices we should use, cap it automatically to the
3738 * number of CPUs currently online */
3739 if (myri10ge_max_slices == -1)
3740 myri10ge_max_slices = ncpus;
3742 if (mgp->num_slices > myri10ge_max_slices)
3743 mgp->num_slices = myri10ge_max_slices;
3745 /* Now try to allocate as many MSI-X vectors as we have
3746 * slices. We give up on MSI-X if we can only get a single
3749 mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
3751 if (mgp->msix_vectors == NULL)
3753 for (i = 0; i < mgp->num_slices; i++) {
3754 mgp->msix_vectors[i].entry = i;
3757 while (mgp->num_slices > 1) {
3758 /* make sure it is a power of two */
3759 while (!is_power_of_2(mgp->num_slices))
3761 if (mgp->num_slices == 1)
3763 status = pci_enable_msix(pdev, mgp->msix_vectors,
3766 pci_disable_msix(pdev);
3772 mgp->num_slices = status;
3778 if (mgp->msix_vectors != NULL) {
3779 kfree(mgp->msix_vectors);
3780 mgp->msix_vectors = NULL;
3784 mgp->num_slices = 1;
3785 set_fw_name(mgp, old_fw, old_allocated);
3786 myri10ge_load_firmware(mgp, 0);
3789 static const struct net_device_ops myri10ge_netdev_ops = {
3790 .ndo_open = myri10ge_open,
3791 .ndo_stop = myri10ge_close,
3792 .ndo_start_xmit = myri10ge_xmit,
3793 .ndo_get_stats64 = myri10ge_get_stats,
3794 .ndo_validate_addr = eth_validate_addr,
3795 .ndo_change_mtu = myri10ge_change_mtu,
3796 .ndo_fix_features = myri10ge_fix_features,
3797 .ndo_set_multicast_list = myri10ge_set_multicast_list,
3798 .ndo_set_mac_address = myri10ge_set_mac_address,
3801 static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3803 struct net_device *netdev;
3804 struct myri10ge_priv *mgp;
3805 struct device *dev = &pdev->dev;
3807 int status = -ENXIO;
3809 unsigned hdr_offset, ss_offset;
3810 static int board_number;
3812 netdev = alloc_etherdev_mq(sizeof(*mgp), MYRI10GE_MAX_SLICES);
3813 if (netdev == NULL) {
3814 dev_err(dev, "Could not allocate ethernet device\n");
3818 SET_NETDEV_DEV(netdev, &pdev->dev);
3820 mgp = netdev_priv(netdev);
3823 mgp->pause = myri10ge_flow_control;
3824 mgp->intr_coal_delay = myri10ge_intr_coal_delay;
3825 mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
3826 mgp->board_number = board_number;
3827 init_waitqueue_head(&mgp->down_wq);
3829 if (pci_enable_device(pdev)) {
3830 dev_err(&pdev->dev, "pci_enable_device call failed\n");
3832 goto abort_with_netdev;
3835 /* Find the vendor-specific cap so we can check
3836 * the reboot register later on */
3837 mgp->vendor_specific_offset
3838 = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
3840 /* Set our max read request to 4KB */
3841 status = pcie_set_readrq(pdev, 4096);
3843 dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
3845 goto abort_with_enabled;
3848 pci_set_master(pdev);
3850 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3854 "64-bit pci address mask was refused, "
3856 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3859 dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
3860 goto abort_with_enabled;
3862 (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3863 mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
3864 &mgp->cmd_bus, GFP_KERNEL);
3865 if (mgp->cmd == NULL)
3866 goto abort_with_enabled;
3868 mgp->board_span = pci_resource_len(pdev, 0);
3869 mgp->iomem_base = pci_resource_start(pdev, 0);
3871 mgp->wc_enabled = 0;
3873 mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
3874 MTRR_TYPE_WRCOMB, 1);
3876 mgp->wc_enabled = 1;
3878 mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
3879 if (mgp->sram == NULL) {
3880 dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
3881 mgp->board_span, mgp->iomem_base);
3883 goto abort_with_mtrr;
3886 ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET)) & 0xffffc;
3887 ss_offset = hdr_offset + offsetof(struct mcp_gen_header, string_specs);
3888 mgp->sram_size = ntohl(__raw_readl(mgp->sram + ss_offset));
3889 if (mgp->sram_size > mgp->board_span ||
3890 mgp->sram_size <= MYRI10GE_FW_OFFSET) {
3892 "invalid sram_size %dB or board span %ldB\n",
3893 mgp->sram_size, mgp->board_span);
3894 goto abort_with_ioremap;
3896 memcpy_fromio(mgp->eeprom_strings,
3897 mgp->sram + mgp->sram_size, MYRI10GE_EEPROM_STRINGS_SIZE);
3898 memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
3899 status = myri10ge_read_mac_addr(mgp);
3901 goto abort_with_ioremap;
3903 for (i = 0; i < ETH_ALEN; i++)
3904 netdev->dev_addr[i] = mgp->mac_addr[i];
3906 myri10ge_select_firmware(mgp);
3908 status = myri10ge_load_firmware(mgp, 1);
3910 dev_err(&pdev->dev, "failed to load firmware\n");
3911 goto abort_with_ioremap;
3913 myri10ge_probe_slices(mgp);
3914 status = myri10ge_alloc_slices(mgp);
3916 dev_err(&pdev->dev, "failed to alloc slice state\n");
3917 goto abort_with_firmware;
3919 netif_set_real_num_tx_queues(netdev, mgp->num_slices);
3920 netif_set_real_num_rx_queues(netdev, mgp->num_slices);
3921 status = myri10ge_reset(mgp);
3923 dev_err(&pdev->dev, "failed reset\n");
3924 goto abort_with_slices;
3926 #ifdef CONFIG_MYRI10GE_DCA
3927 myri10ge_setup_dca(mgp);
3929 pci_set_drvdata(pdev, mgp);
3930 if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
3931 myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
3932 if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
3933 myri10ge_initial_mtu = 68;
3935 netdev->netdev_ops = &myri10ge_netdev_ops;
3936 netdev->mtu = myri10ge_initial_mtu;
3937 netdev->base_addr = mgp->iomem_base;
3938 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
3939 netdev->features = netdev->hw_features;
3942 netdev->features |= NETIF_F_HIGHDMA;
3944 netdev->vlan_features |= mgp->features;
3945 if (mgp->fw_ver_tiny < 37)
3946 netdev->vlan_features &= ~NETIF_F_TSO6;
3947 if (mgp->fw_ver_tiny < 32)
3948 netdev->vlan_features &= ~NETIF_F_TSO;
3950 /* make sure we can get an irq, and that MSI can be
3951 * setup (if available). Also ensure netdev->irq
3952 * is set to correct value if MSI is enabled */
3953 status = myri10ge_request_irq(mgp);
3955 goto abort_with_firmware;
3956 netdev->irq = pdev->irq;
3957 myri10ge_free_irq(mgp);
3959 /* Save configuration space to be restored if the
3960 * nic resets due to a parity error */
3961 pci_save_state(pdev);
3963 /* Setup the watchdog timer */
3964 setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
3965 (unsigned long)mgp);
3967 SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
3968 INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
3969 status = register_netdev(netdev);
3971 dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
3972 goto abort_with_state;
3974 if (mgp->msix_enabled)
3975 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
3976 mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
3977 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3979 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
3980 mgp->msi_enabled ? "MSI" : "xPIC",
3981 netdev->irq, mgp->tx_boundary, mgp->fw_name,
3982 (mgp->wc_enabled ? "Enabled" : "Disabled"));
3988 pci_restore_state(pdev);
3991 myri10ge_free_slices(mgp);
3993 abort_with_firmware:
3994 myri10ge_dummy_rdma(mgp, 0);
3997 if (mgp->mac_addr_string != NULL)
3999 "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
4000 mgp->mac_addr_string, mgp->serial_number);
4006 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
4008 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4009 mgp->cmd, mgp->cmd_bus);
4012 pci_disable_device(pdev);
4015 set_fw_name(mgp, NULL, false);
4016 free_netdev(netdev);
4023 * Does what is necessary to shutdown one Myrinet device. Called
4024 * once for each Myrinet card by the kernel when a module is
4027 static void myri10ge_remove(struct pci_dev *pdev)
4029 struct myri10ge_priv *mgp;
4030 struct net_device *netdev;
4032 mgp = pci_get_drvdata(pdev);
4036 cancel_work_sync(&mgp->watchdog_work);
4038 unregister_netdev(netdev);
4040 #ifdef CONFIG_MYRI10GE_DCA
4041 myri10ge_teardown_dca(mgp);
4043 myri10ge_dummy_rdma(mgp, 0);
4045 /* avoid a memory leak */
4046 pci_restore_state(pdev);
4052 mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
4054 myri10ge_free_slices(mgp);
4055 if (mgp->msix_vectors != NULL)
4056 kfree(mgp->msix_vectors);
4057 dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
4058 mgp->cmd, mgp->cmd_bus);
4060 set_fw_name(mgp, NULL, false);
4061 free_netdev(netdev);
4062 pci_disable_device(pdev);
4063 pci_set_drvdata(pdev, NULL);
4066 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4067 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4069 static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
4070 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
4072 (PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
4076 MODULE_DEVICE_TABLE(pci, myri10ge_pci_tbl);
4078 static struct pci_driver myri10ge_driver = {
4080 .probe = myri10ge_probe,
4081 .remove = myri10ge_remove,
4082 .id_table = myri10ge_pci_tbl,
4084 .suspend = myri10ge_suspend,
4085 .resume = myri10ge_resume,
4089 #ifdef CONFIG_MYRI10GE_DCA
4091 myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p)
4093 int err = driver_for_each_device(&myri10ge_driver.driver,
4095 myri10ge_notify_dca_device);
4102 static struct notifier_block myri10ge_dca_notifier = {
4103 .notifier_call = myri10ge_notify_dca,
4107 #endif /* CONFIG_MYRI10GE_DCA */
4109 static __init int myri10ge_init_module(void)
4111 pr_info("Version %s\n", MYRI10GE_VERSION_STR);
4113 if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
4114 pr_err("Illegal rssh hash type %d, defaulting to source port\n",
4116 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
4118 #ifdef CONFIG_MYRI10GE_DCA
4119 dca_register_notify(&myri10ge_dca_notifier);
4121 if (myri10ge_max_slices > MYRI10GE_MAX_SLICES)
4122 myri10ge_max_slices = MYRI10GE_MAX_SLICES;
4124 return pci_register_driver(&myri10ge_driver);
4127 module_init(myri10ge_init_module);
4129 static __exit void myri10ge_cleanup_module(void)
4131 #ifdef CONFIG_MYRI10GE_DCA
4132 dca_unregister_notify(&myri10ge_dca_notifier);
4134 pci_unregister_driver(&myri10ge_driver);
4137 module_exit(myri10ge_cleanup_module);