1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/types.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/vmalloc.h>
33 #include <linux/string.h>
35 #include <linux/interrupt.h>
37 #include <linux/tcp.h>
38 #include <linux/sctp.h>
39 #include <linux/pkt_sched.h>
40 #include <linux/ipv6.h>
41 #include <linux/slab.h>
42 #include <net/checksum.h>
43 #include <net/ip6_checksum.h>
44 #include <linux/ethtool.h>
46 #include <linux/if_vlan.h>
47 #include <linux/prefetch.h>
48 #include <scsi/fc/fc_fcoe.h>
51 #include "ixgbe_common.h"
52 #include "ixgbe_dcb_82599.h"
53 #include "ixgbe_sriov.h"
55 char ixgbe_driver_name[] = "ixgbe";
56 static const char ixgbe_driver_string[] =
57 "Intel(R) 10 Gigabit PCI Express Network Driver";
61 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
62 __stringify(BUILD) "-k"
63 const char ixgbe_driver_version[] = DRV_VERSION;
64 static const char ixgbe_copyright[] =
65 "Copyright (c) 1999-2011 Intel Corporation.";
67 static const struct ixgbe_info *ixgbe_info_tbl[] = {
68 [board_82598] = &ixgbe_82598_info,
69 [board_82599] = &ixgbe_82599_info,
70 [board_X540] = &ixgbe_X540_info,
73 /* ixgbe_pci_tbl - PCI Device ID Table
75 * Wildcard entries (PCI_ANY_ID) should come last
76 * Last entry must be all 0s
78 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
79 * Class, Class Mask, private data (not used) }
81 static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 },
83 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 },
84 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 },
85 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 },
86 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 },
87 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 },
88 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 },
89 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 },
90 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 },
91 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 },
92 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 },
93 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 },
94 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 },
95 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 },
96 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 },
97 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 },
98 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 },
100 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 },
102 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 },
104 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 },
105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
106 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
107 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
108 /* required last entry */
111 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
113 #ifdef CONFIG_IXGBE_DCA
114 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
116 static struct notifier_block dca_notifier = {
117 .notifier_call = ixgbe_notify_dca,
123 #ifdef CONFIG_PCI_IOV
124 static unsigned int max_vfs;
125 module_param(max_vfs, uint, 0);
126 MODULE_PARM_DESC(max_vfs,
127 "Maximum number of virtual functions to allocate per physical function");
128 #endif /* CONFIG_PCI_IOV */
130 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
131 MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
132 MODULE_LICENSE("GPL");
133 MODULE_VERSION(DRV_VERSION);
135 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
137 static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
139 struct ixgbe_hw *hw = &adapter->hw;
144 #ifdef CONFIG_PCI_IOV
145 /* disable iov and allow time for transactions to clear */
146 pci_disable_sriov(adapter->pdev);
149 /* turn off device IOV mode */
150 gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
151 gcr &= ~(IXGBE_GCR_EXT_SRIOV);
152 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
153 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
154 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
155 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
157 /* set default pool back to 0 */
158 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
159 vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
160 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
161 IXGBE_WRITE_FLUSH(hw);
163 /* take a breather then clean up driver data */
166 kfree(adapter->vfinfo);
167 adapter->vfinfo = NULL;
169 adapter->num_vfs = 0;
170 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
173 static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
175 if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
176 !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
177 schedule_work(&adapter->service_task);
180 static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
182 BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
184 /* flush memory to make sure state is correct before next watchog */
185 smp_mb__before_clear_bit();
186 clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
189 struct ixgbe_reg_info {
194 static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
196 /* General Registers */
197 {IXGBE_CTRL, "CTRL"},
198 {IXGBE_STATUS, "STATUS"},
199 {IXGBE_CTRL_EXT, "CTRL_EXT"},
201 /* Interrupt Registers */
202 {IXGBE_EICR, "EICR"},
205 {IXGBE_SRRCTL(0), "SRRCTL"},
206 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
207 {IXGBE_RDLEN(0), "RDLEN"},
208 {IXGBE_RDH(0), "RDH"},
209 {IXGBE_RDT(0), "RDT"},
210 {IXGBE_RXDCTL(0), "RXDCTL"},
211 {IXGBE_RDBAL(0), "RDBAL"},
212 {IXGBE_RDBAH(0), "RDBAH"},
215 {IXGBE_TDBAL(0), "TDBAL"},
216 {IXGBE_TDBAH(0), "TDBAH"},
217 {IXGBE_TDLEN(0), "TDLEN"},
218 {IXGBE_TDH(0), "TDH"},
219 {IXGBE_TDT(0), "TDT"},
220 {IXGBE_TXDCTL(0), "TXDCTL"},
222 /* List Terminator */
228 * ixgbe_regdump - register printout routine
230 static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
236 switch (reginfo->ofs) {
237 case IXGBE_SRRCTL(0):
238 for (i = 0; i < 64; i++)
239 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
241 case IXGBE_DCA_RXCTRL(0):
242 for (i = 0; i < 64; i++)
243 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
246 for (i = 0; i < 64; i++)
247 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
250 for (i = 0; i < 64; i++)
251 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
254 for (i = 0; i < 64; i++)
255 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
257 case IXGBE_RXDCTL(0):
258 for (i = 0; i < 64; i++)
259 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
262 for (i = 0; i < 64; i++)
263 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
266 for (i = 0; i < 64; i++)
267 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
270 for (i = 0; i < 64; i++)
271 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
274 for (i = 0; i < 64; i++)
275 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
278 for (i = 0; i < 64; i++)
279 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
282 for (i = 0; i < 64; i++)
283 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
286 for (i = 0; i < 64; i++)
287 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
289 case IXGBE_TXDCTL(0):
290 for (i = 0; i < 64; i++)
291 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
294 pr_info("%-15s %08x\n", reginfo->name,
295 IXGBE_READ_REG(hw, reginfo->ofs));
299 for (i = 0; i < 8; i++) {
300 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
301 pr_err("%-15s", rname);
302 for (j = 0; j < 8; j++)
303 pr_cont(" %08x", regs[i*8+j]);
310 * ixgbe_dump - Print registers, tx-rings and rx-rings
312 static void ixgbe_dump(struct ixgbe_adapter *adapter)
314 struct net_device *netdev = adapter->netdev;
315 struct ixgbe_hw *hw = &adapter->hw;
316 struct ixgbe_reg_info *reginfo;
318 struct ixgbe_ring *tx_ring;
319 struct ixgbe_tx_buffer *tx_buffer_info;
320 union ixgbe_adv_tx_desc *tx_desc;
321 struct my_u0 { u64 a; u64 b; } *u0;
322 struct ixgbe_ring *rx_ring;
323 union ixgbe_adv_rx_desc *rx_desc;
324 struct ixgbe_rx_buffer *rx_buffer_info;
328 if (!netif_msg_hw(adapter))
331 /* Print netdevice Info */
333 dev_info(&adapter->pdev->dev, "Net device Info\n");
334 pr_info("Device Name state "
335 "trans_start last_rx\n");
336 pr_info("%-15s %016lX %016lX %016lX\n",
343 /* Print Registers */
344 dev_info(&adapter->pdev->dev, "Register Dump\n");
345 pr_info(" Register Name Value\n");
346 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
347 reginfo->name; reginfo++) {
348 ixgbe_regdump(hw, reginfo);
351 /* Print TX Ring Summary */
352 if (!netdev || !netif_running(netdev))
355 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
356 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
357 for (n = 0; n < adapter->num_tx_queues; n++) {
358 tx_ring = adapter->tx_ring[n];
360 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
361 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
362 n, tx_ring->next_to_use, tx_ring->next_to_clean,
363 (u64)tx_buffer_info->dma,
364 tx_buffer_info->length,
365 tx_buffer_info->next_to_watch,
366 (u64)tx_buffer_info->time_stamp);
370 if (!netif_msg_tx_done(adapter))
371 goto rx_ring_summary;
373 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
375 /* Transmit Descriptor Formats
377 * Advanced Transmit Descriptor
378 * +--------------------------------------------------------------+
379 * 0 | Buffer Address [63:0] |
380 * +--------------------------------------------------------------+
381 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
382 * +--------------------------------------------------------------+
383 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
386 for (n = 0; n < adapter->num_tx_queues; n++) {
387 tx_ring = adapter->tx_ring[n];
388 pr_info("------------------------------------\n");
389 pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
390 pr_info("------------------------------------\n");
391 pr_info("T [desc] [address 63:0 ] "
392 "[PlPOIdStDDt Ln] [bi->dma ] "
393 "leng ntw timestamp bi->skb\n");
395 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
396 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
397 tx_buffer_info = &tx_ring->tx_buffer_info[i];
398 u0 = (struct my_u0 *)tx_desc;
399 pr_info("T [0x%03X] %016llX %016llX %016llX"
400 " %04X %p %016llX %p", i,
403 (u64)tx_buffer_info->dma,
404 tx_buffer_info->length,
405 tx_buffer_info->next_to_watch,
406 (u64)tx_buffer_info->time_stamp,
407 tx_buffer_info->skb);
408 if (i == tx_ring->next_to_use &&
409 i == tx_ring->next_to_clean)
411 else if (i == tx_ring->next_to_use)
413 else if (i == tx_ring->next_to_clean)
418 if (netif_msg_pktdata(adapter) &&
419 tx_buffer_info->dma != 0)
420 print_hex_dump(KERN_INFO, "",
421 DUMP_PREFIX_ADDRESS, 16, 1,
422 phys_to_virt(tx_buffer_info->dma),
423 tx_buffer_info->length, true);
427 /* Print RX Rings Summary */
429 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
430 pr_info("Queue [NTU] [NTC]\n");
431 for (n = 0; n < adapter->num_rx_queues; n++) {
432 rx_ring = adapter->rx_ring[n];
433 pr_info("%5d %5X %5X\n",
434 n, rx_ring->next_to_use, rx_ring->next_to_clean);
438 if (!netif_msg_rx_status(adapter))
441 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
443 /* Advanced Receive Descriptor (Read) Format
445 * +-----------------------------------------------------+
446 * 0 | Packet Buffer Address [63:1] |A0/NSE|
447 * +----------------------------------------------+------+
448 * 8 | Header Buffer Address [63:1] | DD |
449 * +-----------------------------------------------------+
452 * Advanced Receive Descriptor (Write-Back) Format
454 * 63 48 47 32 31 30 21 20 16 15 4 3 0
455 * +------------------------------------------------------+
456 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
457 * | Checksum Ident | | | | Type | Type |
458 * +------------------------------------------------------+
459 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
460 * +------------------------------------------------------+
461 * 63 48 47 32 31 20 19 0
463 for (n = 0; n < adapter->num_rx_queues; n++) {
464 rx_ring = adapter->rx_ring[n];
465 pr_info("------------------------------------\n");
466 pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
467 pr_info("------------------------------------\n");
468 pr_info("R [desc] [ PktBuf A0] "
469 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
470 "<-- Adv Rx Read format\n");
471 pr_info("RWB[desc] [PcsmIpSHl PtRs] "
472 "[vl er S cks ln] ---------------- [bi->skb] "
473 "<-- Adv Rx Write-Back format\n");
475 for (i = 0; i < rx_ring->count; i++) {
476 rx_buffer_info = &rx_ring->rx_buffer_info[i];
477 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
478 u0 = (struct my_u0 *)rx_desc;
479 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
480 if (staterr & IXGBE_RXD_STAT_DD) {
481 /* Descriptor Done */
482 pr_info("RWB[0x%03X] %016llX "
483 "%016llX ---------------- %p", i,
486 rx_buffer_info->skb);
488 pr_info("R [0x%03X] %016llX "
489 "%016llX %016llX %p", i,
492 (u64)rx_buffer_info->dma,
493 rx_buffer_info->skb);
495 if (netif_msg_pktdata(adapter)) {
496 print_hex_dump(KERN_INFO, "",
497 DUMP_PREFIX_ADDRESS, 16, 1,
498 phys_to_virt(rx_buffer_info->dma),
499 rx_ring->rx_buf_len, true);
501 if (rx_ring->rx_buf_len
502 < IXGBE_RXBUFFER_2048)
503 print_hex_dump(KERN_INFO, "",
504 DUMP_PREFIX_ADDRESS, 16, 1,
506 rx_buffer_info->page_dma +
507 rx_buffer_info->page_offset
513 if (i == rx_ring->next_to_use)
515 else if (i == rx_ring->next_to_clean)
527 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
531 /* Let firmware take over control of h/w */
532 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
533 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
534 ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD);
537 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter)
541 /* Let firmware know the driver has taken over */
542 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT,
544 ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD);
548 * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
549 * @adapter: pointer to adapter struct
550 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
551 * @queue: queue to map the corresponding interrupt to
552 * @msix_vector: the vector to map to the corresponding queue
555 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
556 u8 queue, u8 msix_vector)
559 struct ixgbe_hw *hw = &adapter->hw;
560 switch (hw->mac.type) {
561 case ixgbe_mac_82598EB:
562 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
565 index = (((direction * 64) + queue) >> 2) & 0x1F;
566 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
567 ivar &= ~(0xFF << (8 * (queue & 0x3)));
568 ivar |= (msix_vector << (8 * (queue & 0x3)));
569 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
571 case ixgbe_mac_82599EB:
573 if (direction == -1) {
575 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
576 index = ((queue & 1) * 8);
577 ivar = IXGBE_READ_REG(&adapter->hw, IXGBE_IVAR_MISC);
578 ivar &= ~(0xFF << index);
579 ivar |= (msix_vector << index);
580 IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR_MISC, ivar);
583 /* tx or rx causes */
584 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
585 index = ((16 * (queue & 1)) + (8 * direction));
586 ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
587 ivar &= ~(0xFF << index);
588 ivar |= (msix_vector << index);
589 IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), ivar);
597 static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
602 switch (adapter->hw.mac.type) {
603 case ixgbe_mac_82598EB:
604 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
605 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
607 case ixgbe_mac_82599EB:
609 mask = (qmask & 0xFFFFFFFF);
610 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
611 mask = (qmask >> 32);
612 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
619 static inline void ixgbe_unmap_tx_resource(struct ixgbe_ring *ring,
620 struct ixgbe_tx_buffer *tx_buffer)
622 if (tx_buffer->dma) {
623 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE)
624 dma_unmap_page(ring->dev,
629 dma_unmap_single(ring->dev,
637 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
638 struct ixgbe_tx_buffer *tx_buffer_info)
640 ixgbe_unmap_tx_resource(tx_ring, tx_buffer_info);
641 if (tx_buffer_info->skb)
642 dev_kfree_skb_any(tx_buffer_info->skb);
643 tx_buffer_info->skb = NULL;
644 /* tx_buffer_info must be completely set up in the transmit path */
647 static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
649 struct ixgbe_hw *hw = &adapter->hw;
650 struct ixgbe_hw_stats *hwstats = &adapter->stats;
655 if ((hw->fc.current_mode == ixgbe_fc_full) ||
656 (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
657 switch (hw->mac.type) {
658 case ixgbe_mac_82598EB:
659 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
662 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
664 hwstats->lxoffrxc += data;
666 /* refill credits (no tx hang) if we received xoff */
670 for (i = 0; i < adapter->num_tx_queues; i++)
671 clear_bit(__IXGBE_HANG_CHECK_ARMED,
672 &adapter->tx_ring[i]->state);
674 } else if (!(adapter->dcb_cfg.pfc_mode_enable))
677 /* update stats for each tc, only valid with PFC enabled */
678 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
679 switch (hw->mac.type) {
680 case ixgbe_mac_82598EB:
681 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
684 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
686 hwstats->pxoffrxc[i] += xoff[i];
689 /* disarm tx queues that have received xoff frames */
690 for (i = 0; i < adapter->num_tx_queues; i++) {
691 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
692 u8 tc = tx_ring->dcb_tc;
695 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
699 static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
701 return ring->tx_stats.completed;
704 static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
706 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
707 struct ixgbe_hw *hw = &adapter->hw;
709 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
710 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
713 return (head < tail) ?
714 tail - head : (tail + ring->count - head);
719 static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
721 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
722 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
723 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
726 clear_check_for_tx_hang(tx_ring);
729 * Check for a hung queue, but be thorough. This verifies
730 * that a transmit has been completed since the previous
731 * check AND there is at least one packet pending. The
732 * ARMED bit is set to indicate a potential hang. The
733 * bit is cleared if a pause frame is received to remove
734 * false hang detection due to PFC or 802.3x frames. By
735 * requiring this to fail twice we avoid races with
736 * pfc clearing the ARMED bit and conditions where we
737 * run the check_tx_hang logic with a transmit completion
738 * pending but without time to complete it yet.
740 if ((tx_done_old == tx_done) && tx_pending) {
741 /* make sure it is true for two checks in a row */
742 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
745 /* update completed stats and continue */
746 tx_ring->tx_stats.tx_done_old = tx_done;
747 /* reset the countdown */
748 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
755 * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
756 * @adapter: driver private struct
758 static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
761 /* Do the reset outside of interrupt context */
762 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
763 adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
764 ixgbe_service_event_schedule(adapter);
769 * ixgbe_clean_tx_irq - Reclaim resources after transmit completes
770 * @q_vector: structure containing interrupt and ring information
771 * @tx_ring: tx ring to clean
773 static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
774 struct ixgbe_ring *tx_ring)
776 struct ixgbe_adapter *adapter = q_vector->adapter;
777 struct ixgbe_tx_buffer *tx_buffer;
778 union ixgbe_adv_tx_desc *tx_desc;
779 unsigned int total_bytes = 0, total_packets = 0;
780 unsigned int budget = q_vector->tx.work_limit;
781 u16 i = tx_ring->next_to_clean;
783 tx_buffer = &tx_ring->tx_buffer_info[i];
784 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
786 for (; budget; budget--) {
787 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
789 /* if next_to_watch is not set then there is no work pending */
793 /* if DD is not set pending work has not been completed */
794 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
797 /* count the packet as being completed */
798 tx_ring->tx_stats.completed++;
800 /* clear next_to_watch to prevent false hangs */
801 tx_buffer->next_to_watch = NULL;
803 /* prevent any other reads prior to eop_desc being verified */
807 ixgbe_unmap_tx_resource(tx_ring, tx_buffer);
808 tx_desc->wb.status = 0;
809 if (likely(tx_desc == eop_desc)) {
811 dev_kfree_skb_any(tx_buffer->skb);
812 tx_buffer->skb = NULL;
814 total_bytes += tx_buffer->bytecount;
815 total_packets += tx_buffer->gso_segs;
821 if (unlikely(i == tx_ring->count)) {
824 tx_buffer = tx_ring->tx_buffer_info;
825 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, 0);
831 tx_ring->next_to_clean = i;
832 u64_stats_update_begin(&tx_ring->syncp);
833 tx_ring->stats.bytes += total_bytes;
834 tx_ring->stats.packets += total_packets;
835 u64_stats_update_end(&tx_ring->syncp);
836 q_vector->tx.total_bytes += total_bytes;
837 q_vector->tx.total_packets += total_packets;
839 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
840 /* schedule immediate reset if we believe we hung */
841 struct ixgbe_hw *hw = &adapter->hw;
842 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
843 e_err(drv, "Detected Tx Unit Hang\n"
845 " TDH, TDT <%x>, <%x>\n"
846 " next_to_use <%x>\n"
847 " next_to_clean <%x>\n"
848 "tx_buffer_info[next_to_clean]\n"
849 " time_stamp <%lx>\n"
851 tx_ring->queue_index,
852 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
853 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
854 tx_ring->next_to_use, i,
855 tx_ring->tx_buffer_info[i].time_stamp, jiffies);
857 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
860 "tx hang %d detected on queue %d, resetting adapter\n",
861 adapter->tx_timeout_count + 1, tx_ring->queue_index);
863 /* schedule immediate reset if we believe we hung */
864 ixgbe_tx_timeout_reset(adapter);
866 /* the adapter is about to reset, no point in enabling stuff */
870 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
871 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
872 (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
873 /* Make sure that anybody stopping the queue after this
874 * sees the new next_to_clean.
877 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
878 !test_bit(__IXGBE_DOWN, &adapter->state)) {
879 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
880 ++tx_ring->tx_stats.restart_queue;
887 #ifdef CONFIG_IXGBE_DCA
888 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
889 struct ixgbe_ring *rx_ring,
892 struct ixgbe_hw *hw = &adapter->hw;
894 u8 reg_idx = rx_ring->reg_idx;
896 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
897 switch (hw->mac.type) {
898 case ixgbe_mac_82598EB:
899 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
900 rxctrl |= dca3_get_tag(rx_ring->dev, cpu);
902 case ixgbe_mac_82599EB:
904 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
905 rxctrl |= (dca3_get_tag(rx_ring->dev, cpu) <<
906 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
911 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
912 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
913 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
914 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
917 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
918 struct ixgbe_ring *tx_ring,
921 struct ixgbe_hw *hw = &adapter->hw;
923 u8 reg_idx = tx_ring->reg_idx;
925 switch (hw->mac.type) {
926 case ixgbe_mac_82598EB:
927 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
928 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
929 txctrl |= dca3_get_tag(tx_ring->dev, cpu);
930 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
931 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
933 case ixgbe_mac_82599EB:
935 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
936 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
937 txctrl |= (dca3_get_tag(tx_ring->dev, cpu) <<
938 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
939 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
940 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
947 static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
949 struct ixgbe_adapter *adapter = q_vector->adapter;
950 struct ixgbe_ring *ring;
953 if (q_vector->cpu == cpu)
956 for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
957 ixgbe_update_tx_dca(adapter, ring, cpu);
959 for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
960 ixgbe_update_rx_dca(adapter, ring, cpu);
967 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
972 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
975 /* always use CB2 mode, difference is masked in the CB driver */
976 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
978 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
979 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
983 for (i = 0; i < num_q_vectors; i++) {
984 adapter->q_vector[i]->cpu = -1;
985 ixgbe_update_dca(adapter->q_vector[i]);
989 static int __ixgbe_notify_dca(struct device *dev, void *data)
991 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
992 unsigned long event = *(unsigned long *)data;
994 if (!(adapter->flags & IXGBE_FLAG_DCA_CAPABLE))
998 case DCA_PROVIDER_ADD:
999 /* if we're already enabled, don't do it again */
1000 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1002 if (dca_add_requester(dev) == 0) {
1003 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
1004 ixgbe_setup_dca(adapter);
1007 /* Fall Through since DCA is disabled. */
1008 case DCA_PROVIDER_REMOVE:
1009 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
1010 dca_remove_requester(dev);
1011 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
1012 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
1019 #endif /* CONFIG_IXGBE_DCA */
1021 static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
1022 struct sk_buff *skb)
1024 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
1028 * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
1029 * @adapter: address of board private structure
1030 * @rx_desc: advanced rx descriptor
1032 * Returns : true if it is FCoE pkt
1034 static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
1035 union ixgbe_adv_rx_desc *rx_desc)
1037 __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1039 return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
1040 ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
1041 (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
1042 IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
1046 * ixgbe_receive_skb - Send a completed packet up the stack
1047 * @adapter: board private structure
1048 * @skb: packet to send up
1049 * @status: hardware indication of status of receive
1050 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1051 * @rx_desc: rx descriptor
1053 static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
1054 struct sk_buff *skb, u8 status,
1055 struct ixgbe_ring *ring,
1056 union ixgbe_adv_rx_desc *rx_desc)
1058 struct ixgbe_adapter *adapter = q_vector->adapter;
1059 struct napi_struct *napi = &q_vector->napi;
1060 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
1061 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
1063 if (is_vlan && (tag & VLAN_VID_MASK))
1064 __vlan_hwaccel_put_tag(skb, tag);
1066 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
1067 napi_gro_receive(napi, skb);
1073 * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum
1074 * @adapter: address of board private structure
1075 * @status_err: hardware indication of status of receive
1076 * @skb: skb currently being received and modified
1077 * @status_err: status error value of last descriptor in packet
1079 static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1080 union ixgbe_adv_rx_desc *rx_desc,
1081 struct sk_buff *skb,
1084 skb->ip_summed = CHECKSUM_NONE;
1086 /* Rx csum disabled */
1087 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
1090 /* if IP and error */
1091 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
1092 (status_err & IXGBE_RXDADV_ERR_IPE)) {
1093 adapter->hw_csum_rx_error++;
1097 if (!(status_err & IXGBE_RXD_STAT_L4CS))
1100 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
1101 u16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
1104 * 82599 errata, UDP frames with a 0 checksum can be marked as
1107 if ((pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) &&
1108 (adapter->hw.mac.type == ixgbe_mac_82599EB))
1111 adapter->hw_csum_rx_error++;
1115 /* It must be a TCP or UDP packet with a valid checksum */
1116 skb->ip_summed = CHECKSUM_UNNECESSARY;
1119 static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1122 * Force memory writes to complete before letting h/w
1123 * know there are new descriptors to fetch. (Only
1124 * applicable for weak-ordered memory model archs,
1128 writel(val, rx_ring->tail);
1132 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1133 * @rx_ring: ring to place buffers on
1134 * @cleaned_count: number of buffers to replace
1136 void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1138 union ixgbe_adv_rx_desc *rx_desc;
1139 struct ixgbe_rx_buffer *bi;
1140 struct sk_buff *skb;
1141 u16 i = rx_ring->next_to_use;
1143 /* do nothing if no valid netdev defined */
1144 if (!rx_ring->netdev)
1147 while (cleaned_count--) {
1148 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1149 bi = &rx_ring->rx_buffer_info[i];
1153 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1154 rx_ring->rx_buf_len);
1156 rx_ring->rx_stats.alloc_rx_buff_failed++;
1159 /* initialize queue mapping */
1160 skb_record_rx_queue(skb, rx_ring->queue_index);
1165 bi->dma = dma_map_single(rx_ring->dev,
1167 rx_ring->rx_buf_len,
1169 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1170 rx_ring->rx_stats.alloc_rx_buff_failed++;
1176 if (ring_is_ps_enabled(rx_ring)) {
1178 bi->page = netdev_alloc_page(rx_ring->netdev);
1180 rx_ring->rx_stats.alloc_rx_page_failed++;
1185 if (!bi->page_dma) {
1186 /* use a half page if we're re-using */
1187 bi->page_offset ^= PAGE_SIZE / 2;
1188 bi->page_dma = dma_map_page(rx_ring->dev,
1193 if (dma_mapping_error(rx_ring->dev,
1195 rx_ring->rx_stats.alloc_rx_page_failed++;
1201 /* Refresh the desc even if buffer_addrs didn't change
1202 * because each write-back erases this info. */
1203 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1204 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1206 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
1207 rx_desc->read.hdr_addr = 0;
1211 if (i == rx_ring->count)
1216 if (rx_ring->next_to_use != i) {
1217 rx_ring->next_to_use = i;
1218 ixgbe_release_rx_desc(rx_ring, i);
1222 static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
1224 /* HW will not DMA in data larger than the given buffer, even if it
1225 * parses the (NFS, of course) header to be larger. In that case, it
1226 * fills the header buffer and spills the rest into the page.
1228 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1229 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1230 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1231 if (hlen > IXGBE_RX_HDR_SIZE)
1232 hlen = IXGBE_RX_HDR_SIZE;
1237 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1238 * @skb: pointer to the last skb in the rsc queue
1240 * This function changes a queue full of hw rsc buffers into a completed
1241 * packet. It uses the ->prev pointers to find the first packet and then
1242 * turns it into the frag list owner.
1244 static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
1246 unsigned int frag_list_size = 0;
1247 unsigned int skb_cnt = 1;
1250 struct sk_buff *prev = skb->prev;
1251 frag_list_size += skb->len;
1257 skb_shinfo(skb)->frag_list = skb->next;
1259 skb->len += frag_list_size;
1260 skb->data_len += frag_list_size;
1261 skb->truesize += frag_list_size;
1262 IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
1267 static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1269 return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1270 IXGBE_RXDADV_RSCCNT_MASK);
1273 static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1274 struct ixgbe_ring *rx_ring,
1277 struct ixgbe_adapter *adapter = q_vector->adapter;
1278 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1279 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1280 struct sk_buff *skb;
1281 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1282 const int current_node = numa_node_id();
1285 #endif /* IXGBE_FCOE */
1288 u16 cleaned_count = 0;
1289 bool pkt_is_rsc = false;
1291 i = rx_ring->next_to_clean;
1292 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1293 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1295 while (staterr & IXGBE_RXD_STAT_DD) {
1298 rmb(); /* read descriptor and rx_buffer_info after status DD */
1300 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1302 skb = rx_buffer_info->skb;
1303 rx_buffer_info->skb = NULL;
1304 prefetch(skb->data);
1306 if (ring_is_rsc_enabled(rx_ring))
1307 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
1309 /* if this is a skb from previous receive DMA will be 0 */
1310 if (rx_buffer_info->dma) {
1313 !(staterr & IXGBE_RXD_STAT_EOP) &&
1316 * When HWRSC is enabled, delay unmapping
1317 * of the first packet. It carries the
1318 * header information, HW may still
1319 * access the header after the writeback.
1320 * Only unmap it when EOP is reached
1322 IXGBE_RSC_CB(skb)->delay_unmap = true;
1323 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1325 dma_unmap_single(rx_ring->dev,
1326 rx_buffer_info->dma,
1327 rx_ring->rx_buf_len,
1330 rx_buffer_info->dma = 0;
1332 if (ring_is_ps_enabled(rx_ring)) {
1333 hlen = ixgbe_get_hlen(rx_desc);
1334 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1336 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1341 /* assume packet split since header is unmapped */
1342 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1346 dma_unmap_page(rx_ring->dev,
1347 rx_buffer_info->page_dma,
1350 rx_buffer_info->page_dma = 0;
1351 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1352 rx_buffer_info->page,
1353 rx_buffer_info->page_offset,
1356 if ((page_count(rx_buffer_info->page) == 1) &&
1357 (page_to_nid(rx_buffer_info->page) == current_node))
1358 get_page(rx_buffer_info->page);
1360 rx_buffer_info->page = NULL;
1362 skb->len += upper_len;
1363 skb->data_len += upper_len;
1364 skb->truesize += upper_len;
1368 if (i == rx_ring->count)
1371 next_rxd = IXGBE_RX_DESC_ADV(rx_ring, i);
1376 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1377 IXGBE_RXDADV_NEXTP_SHIFT;
1378 next_buffer = &rx_ring->rx_buffer_info[nextp];
1380 next_buffer = &rx_ring->rx_buffer_info[i];
1383 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
1384 if (ring_is_ps_enabled(rx_ring)) {
1385 rx_buffer_info->skb = next_buffer->skb;
1386 rx_buffer_info->dma = next_buffer->dma;
1387 next_buffer->skb = skb;
1388 next_buffer->dma = 0;
1390 skb->next = next_buffer->skb;
1391 skb->next->prev = skb;
1393 rx_ring->rx_stats.non_eop_descs++;
1398 skb = ixgbe_transform_rsc_queue(skb);
1399 /* if we got here without RSC the packet is invalid */
1401 __pskb_trim(skb, 0);
1402 rx_buffer_info->skb = skb;
1407 if (ring_is_rsc_enabled(rx_ring)) {
1408 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1409 dma_unmap_single(rx_ring->dev,
1410 IXGBE_RSC_CB(skb)->dma,
1411 rx_ring->rx_buf_len,
1413 IXGBE_RSC_CB(skb)->dma = 0;
1414 IXGBE_RSC_CB(skb)->delay_unmap = false;
1418 if (ring_is_ps_enabled(rx_ring))
1419 rx_ring->rx_stats.rsc_count +=
1420 skb_shinfo(skb)->nr_frags;
1422 rx_ring->rx_stats.rsc_count +=
1423 IXGBE_RSC_CB(skb)->skb_cnt;
1424 rx_ring->rx_stats.rsc_flush++;
1427 /* ERR_MASK will only have valid bits if EOP set */
1428 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
1429 dev_kfree_skb_any(skb);
1433 ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
1434 if (adapter->netdev->features & NETIF_F_RXHASH)
1435 ixgbe_rx_hash(rx_desc, skb);
1437 /* probably a little skewed due to removing CRC */
1438 total_rx_bytes += skb->len;
1441 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1443 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1444 if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
1445 ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
1448 dev_kfree_skb_any(skb);
1452 #endif /* IXGBE_FCOE */
1453 ixgbe_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
1457 rx_desc->wb.upper.status_error = 0;
1462 /* return some buffers to hardware, one at a time is too slow */
1463 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1464 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1468 /* use prefetched values */
1470 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1473 rx_ring->next_to_clean = i;
1474 cleaned_count = ixgbe_desc_unused(rx_ring);
1477 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1480 /* include DDPed FCoE data */
1481 if (ddp_bytes > 0) {
1484 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
1485 sizeof(struct fc_frame_header) -
1486 sizeof(struct fcoe_crc_eof);
1489 total_rx_bytes += ddp_bytes;
1490 total_rx_packets += DIV_ROUND_UP(ddp_bytes, mss);
1492 #endif /* IXGBE_FCOE */
1494 u64_stats_update_begin(&rx_ring->syncp);
1495 rx_ring->stats.packets += total_rx_packets;
1496 rx_ring->stats.bytes += total_rx_bytes;
1497 u64_stats_update_end(&rx_ring->syncp);
1498 q_vector->rx.total_packets += total_rx_packets;
1499 q_vector->rx.total_bytes += total_rx_bytes;
1505 * ixgbe_configure_msix - Configure MSI-X hardware
1506 * @adapter: board private structure
1508 * ixgbe_configure_msix sets up the hardware to properly generate MSI-X
1511 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1513 struct ixgbe_q_vector *q_vector;
1514 int q_vectors, v_idx;
1517 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1519 /* Populate MSIX to EITR Select */
1520 if (adapter->num_vfs > 32) {
1521 u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
1522 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
1526 * Populate the IVAR table and set the ITR values to the
1527 * corresponding register.
1529 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
1530 struct ixgbe_ring *ring;
1531 q_vector = adapter->q_vector[v_idx];
1533 for (ring = q_vector->rx.ring; ring != NULL; ring = ring->next)
1534 ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx);
1536 for (ring = q_vector->tx.ring; ring != NULL; ring = ring->next)
1537 ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
1539 if (q_vector->tx.ring && !q_vector->rx.ring)
1541 q_vector->eitr = adapter->tx_eitr_param;
1542 else if (q_vector->rx.ring)
1544 q_vector->eitr = adapter->rx_eitr_param;
1546 ixgbe_write_eitr(q_vector);
1549 switch (adapter->hw.mac.type) {
1550 case ixgbe_mac_82598EB:
1551 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1554 case ixgbe_mac_82599EB:
1555 case ixgbe_mac_X540:
1556 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1562 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1564 /* set up to autoclear timer, and the vectors */
1565 mask = IXGBE_EIMS_ENABLE_MASK;
1566 if (adapter->num_vfs)
1567 mask &= ~(IXGBE_EIMS_OTHER |
1568 IXGBE_EIMS_MAILBOX |
1571 mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
1572 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
1575 enum latency_range {
1579 latency_invalid = 255
1583 * ixgbe_update_itr - update the dynamic ITR value based on statistics
1584 * @q_vector: structure containing interrupt and ring information
1585 * @ring_container: structure containing ring performance data
1587 * Stores a new ITR value based on packets and byte
1588 * counts during the last interrupt. The advantage of per interrupt
1589 * computation is faster updates and more accurate ITR for the current
1590 * traffic pattern. Constants in this function were computed
1591 * based on theoretical maximum wire speed and thresholds were set based
1592 * on testing data as well as attempting to minimize response time
1593 * while increasing bulk throughput.
1594 * this functionality is controlled by the InterruptThrottleRate module
1595 * parameter (see ixgbe_param.c)
1597 static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
1598 struct ixgbe_ring_container *ring_container)
1601 struct ixgbe_adapter *adapter = q_vector->adapter;
1602 int bytes = ring_container->total_bytes;
1603 int packets = ring_container->total_packets;
1605 u8 itr_setting = ring_container->itr;
1610 /* simple throttlerate management
1611 * 0-20MB/s lowest (100000 ints/s)
1612 * 20-100MB/s low (20000 ints/s)
1613 * 100-1249MB/s bulk (8000 ints/s)
1615 /* what was last interrupt timeslice? */
1616 timepassed_us = 1000000/q_vector->eitr;
1617 bytes_perint = bytes / timepassed_us; /* bytes/usec */
1619 switch (itr_setting) {
1620 case lowest_latency:
1621 if (bytes_perint > adapter->eitr_low)
1622 itr_setting = low_latency;
1625 if (bytes_perint > adapter->eitr_high)
1626 itr_setting = bulk_latency;
1627 else if (bytes_perint <= adapter->eitr_low)
1628 itr_setting = lowest_latency;
1631 if (bytes_perint <= adapter->eitr_high)
1632 itr_setting = low_latency;
1636 /* clear work counters since we have the values we need */
1637 ring_container->total_bytes = 0;
1638 ring_container->total_packets = 0;
1640 /* write updated itr to ring container */
1641 ring_container->itr = itr_setting;
1645 * ixgbe_write_eitr - write EITR register in hardware specific way
1646 * @q_vector: structure containing interrupt and ring information
1648 * This function is made to be called by ethtool and by the driver
1649 * when it needs to update EITR registers at runtime. Hardware
1650 * specific quirks/differences are taken care of here.
1652 void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1654 struct ixgbe_adapter *adapter = q_vector->adapter;
1655 struct ixgbe_hw *hw = &adapter->hw;
1656 int v_idx = q_vector->v_idx;
1657 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1659 switch (adapter->hw.mac.type) {
1660 case ixgbe_mac_82598EB:
1661 /* must write high and low 16 bits to reset counter */
1662 itr_reg |= (itr_reg << 16);
1664 case ixgbe_mac_82599EB:
1665 case ixgbe_mac_X540:
1667 * 82599 and X540 can support a value of zero, so allow it for
1668 * max interrupt rate, but there is an errata where it can
1669 * not be zero with RSC
1672 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1676 * set the WDIS bit to not clear the timer bits and cause an
1677 * immediate assertion of the interrupt
1679 itr_reg |= IXGBE_EITR_CNT_WDIS;
1684 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1687 static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
1689 u32 new_itr = q_vector->eitr;
1692 ixgbe_update_itr(q_vector, &q_vector->tx);
1693 ixgbe_update_itr(q_vector, &q_vector->rx);
1695 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
1697 switch (current_itr) {
1698 /* counts and packets in update_itr are dependent on these numbers */
1699 case lowest_latency:
1703 new_itr = 20000; /* aka hwitr = ~200 */
1712 if (new_itr != q_vector->eitr) {
1713 /* do an exponential smoothing */
1714 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
1716 /* save the algorithm value here */
1717 q_vector->eitr = new_itr;
1719 ixgbe_write_eitr(q_vector);
1724 * ixgbe_check_overtemp_subtask - check for over tempurature
1725 * @adapter: pointer to adapter
1727 static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
1729 struct ixgbe_hw *hw = &adapter->hw;
1730 u32 eicr = adapter->interrupt_event;
1732 if (test_bit(__IXGBE_DOWN, &adapter->state))
1735 if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1736 !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
1739 adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
1741 switch (hw->device_id) {
1742 case IXGBE_DEV_ID_82599_T3_LOM:
1744 * Since the warning interrupt is for both ports
1745 * we don't have to check if:
1746 * - This interrupt wasn't for our port.
1747 * - We may have missed the interrupt so always have to
1748 * check if we got a LSC
1750 if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
1751 !(eicr & IXGBE_EICR_LSC))
1754 if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
1756 bool link_up = false;
1758 hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
1764 /* Check if this is not due to overtemp */
1765 if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
1770 if (!(eicr & IXGBE_EICR_GPI_SDP0))
1775 "Network adapter has been stopped because it has over heated. "
1776 "Restart the computer. If the problem persists, "
1777 "power off the system and replace the adapter\n");
1779 adapter->interrupt_event = 0;
1782 static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
1784 struct ixgbe_hw *hw = &adapter->hw;
1786 if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) &&
1787 (eicr & IXGBE_EICR_GPI_SDP1)) {
1788 e_crit(probe, "Fan has stopped, replace the adapter\n");
1789 /* write to clear the interrupt */
1790 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1794 static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1796 struct ixgbe_hw *hw = &adapter->hw;
1798 if (eicr & IXGBE_EICR_GPI_SDP2) {
1799 /* Clear the interrupt */
1800 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1801 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1802 adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
1803 ixgbe_service_event_schedule(adapter);
1807 if (eicr & IXGBE_EICR_GPI_SDP1) {
1808 /* Clear the interrupt */
1809 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1810 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1811 adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
1812 ixgbe_service_event_schedule(adapter);
1817 static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1819 struct ixgbe_hw *hw = &adapter->hw;
1822 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1823 adapter->link_check_timeout = jiffies;
1824 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1825 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1826 IXGBE_WRITE_FLUSH(hw);
1827 ixgbe_service_event_schedule(adapter);
1831 static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1835 struct ixgbe_hw *hw = &adapter->hw;
1837 switch (hw->mac.type) {
1838 case ixgbe_mac_82598EB:
1839 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1840 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1842 case ixgbe_mac_82599EB:
1843 case ixgbe_mac_X540:
1844 mask = (qmask & 0xFFFFFFFF);
1846 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1847 mask = (qmask >> 32);
1849 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1854 /* skip the flush */
1857 static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1861 struct ixgbe_hw *hw = &adapter->hw;
1863 switch (hw->mac.type) {
1864 case ixgbe_mac_82598EB:
1865 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1866 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1868 case ixgbe_mac_82599EB:
1869 case ixgbe_mac_X540:
1870 mask = (qmask & 0xFFFFFFFF);
1872 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1873 mask = (qmask >> 32);
1875 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1880 /* skip the flush */
1884 * ixgbe_irq_enable - Enable default interrupt generation settings
1885 * @adapter: board private structure
1887 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
1890 u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1892 /* don't reenable LSC while waiting for link */
1893 if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
1894 mask &= ~IXGBE_EIMS_LSC;
1896 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
1897 mask |= IXGBE_EIMS_GPI_SDP0;
1898 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
1899 mask |= IXGBE_EIMS_GPI_SDP1;
1900 switch (adapter->hw.mac.type) {
1901 case ixgbe_mac_82599EB:
1902 case ixgbe_mac_X540:
1903 mask |= IXGBE_EIMS_ECC;
1904 mask |= IXGBE_EIMS_GPI_SDP1;
1905 mask |= IXGBE_EIMS_GPI_SDP2;
1906 mask |= IXGBE_EIMS_MAILBOX;
1911 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
1912 !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
1913 mask |= IXGBE_EIMS_FLOW_DIR;
1915 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1917 ixgbe_irq_enable_queues(adapter, ~0);
1919 IXGBE_WRITE_FLUSH(&adapter->hw);
1922 static irqreturn_t ixgbe_msix_other(int irq, void *data)
1924 struct ixgbe_adapter *adapter = data;
1925 struct ixgbe_hw *hw = &adapter->hw;
1929 * Workaround for Silicon errata. Use clear-by-write instead
1930 * of clear-by-read. Reading with EICS will return the
1931 * interrupt causes without clearing, which later be done
1932 * with the write to EICR.
1934 eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
1935 IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr);
1937 if (eicr & IXGBE_EICR_LSC)
1938 ixgbe_check_lsc(adapter);
1940 if (eicr & IXGBE_EICR_MAILBOX)
1941 ixgbe_msg_task(adapter);
1943 switch (hw->mac.type) {
1944 case ixgbe_mac_82599EB:
1945 case ixgbe_mac_X540:
1946 if (eicr & IXGBE_EICR_ECC)
1947 e_info(link, "Received unrecoverable ECC Err, please "
1949 /* Handle Flow Director Full threshold interrupt */
1950 if (eicr & IXGBE_EICR_FLOW_DIR) {
1951 int reinit_count = 0;
1953 for (i = 0; i < adapter->num_tx_queues; i++) {
1954 struct ixgbe_ring *ring = adapter->tx_ring[i];
1955 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1960 /* no more flow director interrupts until after init */
1961 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
1962 adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
1963 ixgbe_service_event_schedule(adapter);
1966 ixgbe_check_sfp_event(adapter, eicr);
1967 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1968 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1969 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1970 adapter->interrupt_event = eicr;
1971 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
1972 ixgbe_service_event_schedule(adapter);
1980 ixgbe_check_fan_failure(adapter, eicr);
1982 /* re-enable the original interrupt state, no lsc, no queues */
1983 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1984 ixgbe_irq_enable(adapter, false, false);
1989 static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
1991 struct ixgbe_q_vector *q_vector = data;
1993 /* EIAM disabled interrupts (on this vector) for us */
1995 if (q_vector->rx.ring || q_vector->tx.ring)
1996 napi_schedule(&q_vector->napi);
2001 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2004 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2005 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
2007 rx_ring->q_vector = q_vector;
2008 rx_ring->next = q_vector->rx.ring;
2009 q_vector->rx.ring = rx_ring;
2010 q_vector->rx.count++;
2013 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2016 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2017 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
2019 tx_ring->q_vector = q_vector;
2020 tx_ring->next = q_vector->tx.ring;
2021 q_vector->tx.ring = tx_ring;
2022 q_vector->tx.count++;
2023 q_vector->tx.work_limit = a->tx_work_limit;
2027 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2028 * @adapter: board private structure to initialize
2030 * This function maps descriptor rings to the queue-specific vectors
2031 * we were allotted through the MSI-X enabling code. Ideally, we'd have
2032 * one vector per ring/queue, but on a constrained vector budget, we
2033 * group the rings as "efficiently" as possible. You would add new
2034 * mapping configurations in here.
2036 static void ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
2038 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2039 int rxr_remaining = adapter->num_rx_queues, rxr_idx = 0;
2040 int txr_remaining = adapter->num_tx_queues, txr_idx = 0;
2043 /* only one q_vector if MSI-X is disabled. */
2044 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2048 * If we don't have enough vectors for a 1-to-1 mapping, we'll have to
2049 * group them so there are multiple queues per vector.
2051 * Re-adjusting *qpv takes care of the remainder.
2053 for (; v_start < q_vectors && rxr_remaining; v_start++) {
2054 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_start);
2055 for (; rqpv; rqpv--, rxr_idx++, rxr_remaining--)
2056 map_vector_to_rxq(adapter, v_start, rxr_idx);
2060 * If there are not enough q_vectors for each ring to have it's own
2061 * vector then we must pair up Rx/Tx on a each vector
2063 if ((v_start + txr_remaining) > q_vectors)
2066 for (; v_start < q_vectors && txr_remaining; v_start++) {
2067 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_start);
2068 for (; tqpv; tqpv--, txr_idx++, txr_remaining--)
2069 map_vector_to_txq(adapter, v_start, txr_idx);
2074 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
2075 * @adapter: board private structure
2077 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
2078 * interrupts from the kernel.
2080 static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2082 struct net_device *netdev = adapter->netdev;
2083 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2087 for (vector = 0; vector < q_vectors; vector++) {
2088 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2089 struct msix_entry *entry = &adapter->msix_entries[vector];
2091 if (q_vector->tx.ring && q_vector->rx.ring) {
2092 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2093 "%s-%s-%d", netdev->name, "TxRx", ri++);
2095 } else if (q_vector->rx.ring) {
2096 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2097 "%s-%s-%d", netdev->name, "rx", ri++);
2098 } else if (q_vector->tx.ring) {
2099 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2100 "%s-%s-%d", netdev->name, "tx", ti++);
2102 /* skip this unused q_vector */
2105 err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0,
2106 q_vector->name, q_vector);
2108 e_err(probe, "request_irq failed for MSIX interrupt "
2109 "Error: %d\n", err);
2110 goto free_queue_irqs;
2112 /* If Flow Director is enabled, set interrupt affinity */
2113 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
2114 /* assign the mask for this irq */
2115 irq_set_affinity_hint(entry->vector,
2116 q_vector->affinity_mask);
2120 err = request_irq(adapter->msix_entries[vector].vector,
2121 ixgbe_msix_other, 0, netdev->name, adapter);
2123 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2124 goto free_queue_irqs;
2132 irq_set_affinity_hint(adapter->msix_entries[vector].vector,
2134 free_irq(adapter->msix_entries[vector].vector,
2135 adapter->q_vector[vector]);
2137 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2138 pci_disable_msix(adapter->pdev);
2139 kfree(adapter->msix_entries);
2140 adapter->msix_entries = NULL;
2145 * ixgbe_intr - legacy mode Interrupt Handler
2146 * @irq: interrupt number
2147 * @data: pointer to a network interface device structure
2149 static irqreturn_t ixgbe_intr(int irq, void *data)
2151 struct ixgbe_adapter *adapter = data;
2152 struct ixgbe_hw *hw = &adapter->hw;
2153 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2157 * Workaround for silicon errata on 82598. Mask the interrupts
2158 * before the read of EICR.
2160 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
2162 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
2163 * therefore no explict interrupt disable is necessary */
2164 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
2167 * shared interrupt alert!
2168 * make sure interrupts are enabled because the read will
2169 * have disabled interrupts due to EIAM
2170 * finish the workaround of silicon errata on 82598. Unmask
2171 * the interrupt that we masked before the EICR read.
2173 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2174 ixgbe_irq_enable(adapter, true, true);
2175 return IRQ_NONE; /* Not our interrupt */
2178 if (eicr & IXGBE_EICR_LSC)
2179 ixgbe_check_lsc(adapter);
2181 switch (hw->mac.type) {
2182 case ixgbe_mac_82599EB:
2183 ixgbe_check_sfp_event(adapter, eicr);
2184 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2185 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2186 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2187 adapter->interrupt_event = eicr;
2188 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
2189 ixgbe_service_event_schedule(adapter);
2197 ixgbe_check_fan_failure(adapter, eicr);
2199 if (napi_schedule_prep(&(q_vector->napi))) {
2200 /* would disable interrupts here but EIAM disabled it */
2201 __napi_schedule(&(q_vector->napi));
2205 * re-enable link(maybe) and non-queue interrupts, no flush.
2206 * ixgbe_poll will re-enable the queue interrupts
2209 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2210 ixgbe_irq_enable(adapter, false, false);
2215 static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
2217 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2220 /* legacy and MSI only use one vector */
2221 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2224 for (i = 0; i < adapter->num_rx_queues; i++) {
2225 adapter->rx_ring[i]->q_vector = NULL;
2226 adapter->rx_ring[i]->next = NULL;
2228 for (i = 0; i < adapter->num_tx_queues; i++) {
2229 adapter->tx_ring[i]->q_vector = NULL;
2230 adapter->tx_ring[i]->next = NULL;
2233 for (i = 0; i < q_vectors; i++) {
2234 struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
2235 memset(&q_vector->rx, 0, sizeof(struct ixgbe_ring_container));
2236 memset(&q_vector->tx, 0, sizeof(struct ixgbe_ring_container));
2241 * ixgbe_request_irq - initialize interrupts
2242 * @adapter: board private structure
2244 * Attempts to configure interrupts using the best available
2245 * capabilities of the hardware and kernel.
2247 static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
2249 struct net_device *netdev = adapter->netdev;
2252 /* map all of the rings to the q_vectors */
2253 ixgbe_map_rings_to_vectors(adapter);
2255 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2256 err = ixgbe_request_msix_irqs(adapter);
2257 else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED)
2258 err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
2259 netdev->name, adapter);
2261 err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
2262 netdev->name, adapter);
2265 e_err(probe, "request_irq failed, Error %d\n", err);
2267 /* place q_vectors and rings back into a known good state */
2268 ixgbe_reset_q_vectors(adapter);
2274 static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2276 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2279 q_vectors = adapter->num_msix_vectors;
2281 free_irq(adapter->msix_entries[i].vector, adapter);
2284 for (; i >= 0; i--) {
2285 /* free only the irqs that were actually requested */
2286 if (!adapter->q_vector[i]->rx.ring &&
2287 !adapter->q_vector[i]->tx.ring)
2290 /* clear the affinity_mask in the IRQ descriptor */
2291 irq_set_affinity_hint(adapter->msix_entries[i].vector,
2294 free_irq(adapter->msix_entries[i].vector,
2295 adapter->q_vector[i]);
2298 free_irq(adapter->pdev->irq, adapter);
2301 /* clear q_vector state information */
2302 ixgbe_reset_q_vectors(adapter);
2306 * ixgbe_irq_disable - Mask off interrupt generation on the NIC
2307 * @adapter: board private structure
2309 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2311 switch (adapter->hw.mac.type) {
2312 case ixgbe_mac_82598EB:
2313 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2315 case ixgbe_mac_82599EB:
2316 case ixgbe_mac_X540:
2317 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2318 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2319 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2324 IXGBE_WRITE_FLUSH(&adapter->hw);
2325 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2327 for (i = 0; i < adapter->num_msix_vectors; i++)
2328 synchronize_irq(adapter->msix_entries[i].vector);
2330 synchronize_irq(adapter->pdev->irq);
2335 * ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
2338 static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
2340 struct ixgbe_hw *hw = &adapter->hw;
2342 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
2343 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr_param));
2345 ixgbe_set_ivar(adapter, 0, 0, 0);
2346 ixgbe_set_ivar(adapter, 1, 0, 0);
2348 e_info(hw, "Legacy interrupt IVAR setup done\n");
2352 * ixgbe_configure_tx_ring - Configure 8259x Tx ring after Reset
2353 * @adapter: board private structure
2354 * @ring: structure containing ring specific data
2356 * Configure the Tx descriptor ring after a reset.
2358 void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2359 struct ixgbe_ring *ring)
2361 struct ixgbe_hw *hw = &adapter->hw;
2362 u64 tdba = ring->dma;
2364 u32 txdctl = IXGBE_TXDCTL_ENABLE;
2365 u8 reg_idx = ring->reg_idx;
2367 /* disable queue to avoid issues while updating state */
2368 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
2369 IXGBE_WRITE_FLUSH(hw);
2371 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx),
2372 (tdba & DMA_BIT_MASK(32)));
2373 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32));
2374 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx),
2375 ring->count * sizeof(union ixgbe_adv_tx_desc));
2376 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2377 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2378 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2381 * set WTHRESH to encourage burst writeback, it should not be set
2382 * higher than 1 when ITR is 0 as it could cause false TX hangs
2384 * In order to avoid issues WTHRESH + PTHRESH should always be equal
2385 * to or less than the number of on chip descriptors, which is
2388 if (!adapter->tx_itr_setting || !adapter->rx_itr_setting)
2389 txdctl |= (1 << 16); /* WTHRESH = 1 */
2391 txdctl |= (8 << 16); /* WTHRESH = 8 */
2393 /* PTHRESH=32 is needed to avoid a Tx hang with DFP enabled. */
2394 txdctl |= (1 << 8) | /* HTHRESH = 1 */
2395 32; /* PTHRESH = 32 */
2397 /* reinitialize flowdirector state */
2398 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2399 adapter->atr_sample_rate) {
2400 ring->atr_sample_rate = adapter->atr_sample_rate;
2401 ring->atr_count = 0;
2402 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2404 ring->atr_sample_rate = 0;
2407 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2410 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl);
2412 /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2413 if (hw->mac.type == ixgbe_mac_82598EB &&
2414 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2417 /* poll to verify queue is enabled */
2419 usleep_range(1000, 2000);
2420 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
2421 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
2423 e_err(drv, "Could not enable Tx Queue %d\n", reg_idx);
2426 static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
2428 struct ixgbe_hw *hw = &adapter->hw;
2431 u8 tcs = netdev_get_num_tc(adapter->netdev);
2433 if (hw->mac.type == ixgbe_mac_82598EB)
2436 /* disable the arbiter while setting MTQC */
2437 rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
2438 rttdcs |= IXGBE_RTTDCS_ARBDIS;
2439 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2441 /* set transmit pool layout */
2442 switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2443 case (IXGBE_FLAG_SRIOV_ENABLED):
2444 IXGBE_WRITE_REG(hw, IXGBE_MTQC,
2445 (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
2449 reg = IXGBE_MTQC_64Q_1PB;
2451 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
2453 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
2455 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
2457 /* Enable Security TX Buffer IFG for multiple pb */
2459 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
2460 reg |= IXGBE_SECTX_DCB;
2461 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
2466 /* re-enable the arbiter */
2467 rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
2468 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
2472 * ixgbe_configure_tx - Configure 8259x Transmit Unit after Reset
2473 * @adapter: board private structure
2475 * Configure the Tx unit of the MAC after a reset.
2477 static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
2479 struct ixgbe_hw *hw = &adapter->hw;
2483 ixgbe_setup_mtqc(adapter);
2485 if (hw->mac.type != ixgbe_mac_82598EB) {
2486 /* DMATXCTL.EN must be before Tx queues are enabled */
2487 dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
2488 dmatxctl |= IXGBE_DMATXCTL_TE;
2489 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
2492 /* Setup the HW Tx Head and Tail descriptor pointers */
2493 for (i = 0; i < adapter->num_tx_queues; i++)
2494 ixgbe_configure_tx_ring(adapter, adapter->tx_ring[i]);
2497 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
2499 static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2500 struct ixgbe_ring *rx_ring)
2503 u8 reg_idx = rx_ring->reg_idx;
2505 switch (adapter->hw.mac.type) {
2506 case ixgbe_mac_82598EB: {
2507 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2508 const int mask = feature[RING_F_RSS].mask;
2509 reg_idx = reg_idx & mask;
2512 case ixgbe_mac_82599EB:
2513 case ixgbe_mac_X540:
2518 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
2520 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2521 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
2522 if (adapter->num_vfs)
2523 srrctl |= IXGBE_SRRCTL_DROP_EN;
2525 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2526 IXGBE_SRRCTL_BSIZEHDR_MASK;
2528 if (ring_is_ps_enabled(rx_ring)) {
2529 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2530 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2532 srrctl |= (PAGE_SIZE / 2) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2534 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2536 srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >>
2537 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2538 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2541 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
2544 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2546 struct ixgbe_hw *hw = &adapter->hw;
2547 static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D,
2548 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE,
2549 0x6A3E67EA, 0x14364D17, 0x3BED200D};
2550 u32 mrqc = 0, reta = 0;
2553 u8 tcs = netdev_get_num_tc(adapter->netdev);
2554 int maxq = adapter->ring_feature[RING_F_RSS].indices;
2557 maxq = min(maxq, adapter->num_tx_queues / tcs);
2559 /* Fill out hash function seeds */
2560 for (i = 0; i < 10; i++)
2561 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]);
2563 /* Fill out redirection table */
2564 for (i = 0, j = 0; i < 128; i++, j++) {
2567 /* reta = 4-byte sliding window of
2568 * 0x00..(indices-1)(indices-1)00..etc. */
2569 reta = (reta << 8) | (j * 0x11);
2571 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
2574 /* Disable indicating checksum in descriptor, enables RSS hash */
2575 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
2576 rxcsum |= IXGBE_RXCSUM_PCSD;
2577 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
2579 if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
2580 (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
2581 mrqc = IXGBE_MRQC_RSSEN;
2583 int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
2584 | IXGBE_FLAG_SRIOV_ENABLED);
2587 case (IXGBE_FLAG_RSS_ENABLED):
2589 mrqc = IXGBE_MRQC_RSSEN;
2591 mrqc = IXGBE_MRQC_RTRSS4TCEN;
2593 mrqc = IXGBE_MRQC_RTRSS8TCEN;
2595 case (IXGBE_FLAG_SRIOV_ENABLED):
2596 mrqc = IXGBE_MRQC_VMDQEN;
2603 /* Perform hash on these packet types */
2604 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2605 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2606 | IXGBE_MRQC_RSS_FIELD_IPV6
2607 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2609 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2613 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2614 * @adapter: address of board private structure
2615 * @index: index of ring to set
2617 static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2618 struct ixgbe_ring *ring)
2620 struct ixgbe_hw *hw = &adapter->hw;
2623 u8 reg_idx = ring->reg_idx;
2625 if (!ring_is_rsc_enabled(ring))
2628 rx_buf_len = ring->rx_buf_len;
2629 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2630 rscctrl |= IXGBE_RSCCTL_RSCEN;
2632 * we must limit the number of descriptors so that the
2633 * total size of max desc * buf_len is not greater
2636 if (ring_is_ps_enabled(ring)) {
2637 #if (MAX_SKB_FRAGS > 16)
2638 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2639 #elif (MAX_SKB_FRAGS > 8)
2640 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2641 #elif (MAX_SKB_FRAGS > 4)
2642 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2644 rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
2647 if (rx_buf_len < IXGBE_RXBUFFER_4096)
2648 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2649 else if (rx_buf_len < IXGBE_RXBUFFER_8192)
2650 rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
2652 rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
2654 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2658 * ixgbe_set_uta - Set unicast filter table address
2659 * @adapter: board private structure
2661 * The unicast table address is a register array of 32-bit registers.
2662 * The table is meant to be used in a way similar to how the MTA is used
2663 * however due to certain limitations in the hardware it is necessary to
2664 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2665 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2667 static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2669 struct ixgbe_hw *hw = &adapter->hw;
2672 /* The UTA table only exists on 82599 hardware and newer */
2673 if (hw->mac.type < ixgbe_mac_82599EB)
2676 /* we only need to do this if VMDq is enabled */
2677 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2680 for (i = 0; i < 128; i++)
2681 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2684 #define IXGBE_MAX_RX_DESC_POLL 10
2685 static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2686 struct ixgbe_ring *ring)
2688 struct ixgbe_hw *hw = &adapter->hw;
2689 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2691 u8 reg_idx = ring->reg_idx;
2693 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2694 if (hw->mac.type == ixgbe_mac_82598EB &&
2695 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2699 usleep_range(1000, 2000);
2700 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2701 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
2704 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within "
2705 "the polling period\n", reg_idx);
2709 void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
2710 struct ixgbe_ring *ring)
2712 struct ixgbe_hw *hw = &adapter->hw;
2713 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2715 u8 reg_idx = ring->reg_idx;
2717 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2718 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
2720 /* write value back with RXDCTL.ENABLE bit cleared */
2721 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2723 if (hw->mac.type == ixgbe_mac_82598EB &&
2724 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
2727 /* the hardware may take up to 100us to really disable the rx queue */
2730 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2731 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
2734 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
2735 "the polling period\n", reg_idx);
2739 void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2740 struct ixgbe_ring *ring)
2742 struct ixgbe_hw *hw = &adapter->hw;
2743 u64 rdba = ring->dma;
2745 u8 reg_idx = ring->reg_idx;
2747 /* disable queue to avoid issues while updating state */
2748 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2749 ixgbe_disable_rx_queue(adapter, ring);
2751 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2752 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
2753 IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
2754 ring->count * sizeof(union ixgbe_adv_rx_desc));
2755 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2756 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2757 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
2759 ixgbe_configure_srrctl(adapter, ring);
2760 ixgbe_configure_rscctl(adapter, ring);
2762 /* If operating in IOV mode set RLPML for X540 */
2763 if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
2764 hw->mac.type == ixgbe_mac_X540) {
2765 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
2766 rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
2767 ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
2770 if (hw->mac.type == ixgbe_mac_82598EB) {
2772 * enable cache line friendly hardware writes:
2773 * PTHRESH=32 descriptors (half the internal cache),
2774 * this also removes ugly rx_no_buffer_count increment
2775 * HTHRESH=4 descriptors (to minimize latency on fetch)
2776 * WTHRESH=8 burst writeback up to two cache lines
2778 rxdctl &= ~0x3FFFFF;
2782 /* enable receive descriptor ring */
2783 rxdctl |= IXGBE_RXDCTL_ENABLE;
2784 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2786 ixgbe_rx_desc_queue_enable(adapter, ring);
2787 ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
2790 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
2792 struct ixgbe_hw *hw = &adapter->hw;
2795 /* PSRTYPE must be initialized in non 82598 adapters */
2796 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
2797 IXGBE_PSRTYPE_UDPHDR |
2798 IXGBE_PSRTYPE_IPV4HDR |
2799 IXGBE_PSRTYPE_L2HDR |
2800 IXGBE_PSRTYPE_IPV6HDR;
2802 if (hw->mac.type == ixgbe_mac_82598EB)
2805 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED)
2806 psrtype |= (adapter->num_rx_queues_per_pool << 29);
2808 for (p = 0; p < adapter->num_rx_pools; p++)
2809 IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(adapter->num_vfs + p),
2813 static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2815 struct ixgbe_hw *hw = &adapter->hw;
2818 u32 reg_offset, vf_shift;
2821 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2824 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2825 vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN | IXGBE_VT_CTL_REPLEN;
2826 vt_reg_bits |= (adapter->num_vfs << IXGBE_VT_CTL_POOL_SHIFT);
2827 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
2829 vf_shift = adapter->num_vfs % 32;
2830 reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
2832 /* Enable only the PF's pool for Tx/Rx */
2833 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2834 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), 0);
2835 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2836 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), 0);
2837 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2839 /* Map PF MAC address in RAR Entry 0 to first pool following VFs */
2840 hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
2843 * Set up VF register offsets for selected VT Mode,
2844 * i.e. 32 or 64 VFs for SR-IOV
2846 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
2847 gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
2848 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
2849 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
2851 /* enable Tx loopback for VF/PF communication */
2852 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2853 /* Enable MAC Anti-Spoofing */
2854 hw->mac.ops.set_mac_anti_spoofing(hw,
2855 (adapter->antispoofing_enabled =
2856 (adapter->num_vfs != 0)),
2860 static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2862 struct ixgbe_hw *hw = &adapter->hw;
2863 struct net_device *netdev = adapter->netdev;
2864 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2866 struct ixgbe_ring *rx_ring;