1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/mii.h>
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/pci.h>
41 #include <linux/delay.h>
42 #include <linux/interrupt.h>
43 #include <linux/if_ether.h>
45 #include <linux/dca.h>
49 #define DRV_VERSION "1.2.45-k2"
50 char igb_driver_name[] = "igb";
51 char igb_driver_version[] = DRV_VERSION;
52 static const char igb_driver_string[] =
53 "Intel(R) Gigabit Ethernet Network Driver";
54 static const char igb_copyright[] = "Copyright (c) 2008 Intel Corporation.";
56 static const struct e1000_info *igb_info_tbl[] = {
57 [board_82575] = &e1000_82575_info,
60 static struct pci_device_id igb_pci_tbl[] = {
61 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
62 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
63 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
68 /* required last entry */
72 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
74 void igb_reset(struct igb_adapter *);
75 static int igb_setup_all_tx_resources(struct igb_adapter *);
76 static int igb_setup_all_rx_resources(struct igb_adapter *);
77 static void igb_free_all_tx_resources(struct igb_adapter *);
78 static void igb_free_all_rx_resources(struct igb_adapter *);
79 static void igb_free_tx_resources(struct igb_ring *);
80 static void igb_free_rx_resources(struct igb_ring *);
81 void igb_update_stats(struct igb_adapter *);
82 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
83 static void __devexit igb_remove(struct pci_dev *pdev);
84 static int igb_sw_init(struct igb_adapter *);
85 static int igb_open(struct net_device *);
86 static int igb_close(struct net_device *);
87 static void igb_configure_tx(struct igb_adapter *);
88 static void igb_configure_rx(struct igb_adapter *);
89 static void igb_setup_rctl(struct igb_adapter *);
90 static void igb_clean_all_tx_rings(struct igb_adapter *);
91 static void igb_clean_all_rx_rings(struct igb_adapter *);
92 static void igb_clean_tx_ring(struct igb_ring *);
93 static void igb_clean_rx_ring(struct igb_ring *);
94 static void igb_set_multi(struct net_device *);
95 static void igb_update_phy_info(unsigned long);
96 static void igb_watchdog(unsigned long);
97 static void igb_watchdog_task(struct work_struct *);
98 static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
100 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
101 static struct net_device_stats *igb_get_stats(struct net_device *);
102 static int igb_change_mtu(struct net_device *, int);
103 static int igb_set_mac(struct net_device *, void *);
104 static irqreturn_t igb_intr(int irq, void *);
105 static irqreturn_t igb_intr_msi(int irq, void *);
106 static irqreturn_t igb_msix_other(int irq, void *);
107 static irqreturn_t igb_msix_rx(int irq, void *);
108 static irqreturn_t igb_msix_tx(int irq, void *);
109 static int igb_clean_rx_ring_msix(struct napi_struct *, int);
111 static void igb_update_rx_dca(struct igb_ring *);
112 static void igb_update_tx_dca(struct igb_ring *);
113 static void igb_setup_dca(struct igb_adapter *);
114 #endif /* CONFIG_DCA */
115 static bool igb_clean_tx_irq(struct igb_ring *);
116 static int igb_poll(struct napi_struct *, int);
117 static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
118 static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
119 #ifdef CONFIG_IGB_LRO
120 static int igb_get_skb_hdr(struct sk_buff *skb, void **, void **, u64 *, void *);
122 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
123 static void igb_tx_timeout(struct net_device *);
124 static void igb_reset_task(struct work_struct *);
125 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
126 static void igb_vlan_rx_add_vid(struct net_device *, u16);
127 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
128 static void igb_restore_vlan(struct igb_adapter *);
130 static int igb_suspend(struct pci_dev *, pm_message_t);
132 static int igb_resume(struct pci_dev *);
134 static void igb_shutdown(struct pci_dev *);
136 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
137 static struct notifier_block dca_notifier = {
138 .notifier_call = igb_notify_dca,
144 #ifdef CONFIG_NET_POLL_CONTROLLER
145 /* for netdump / net console */
146 static void igb_netpoll(struct net_device *);
149 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
150 pci_channel_state_t);
151 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
152 static void igb_io_resume(struct pci_dev *);
154 static struct pci_error_handlers igb_err_handler = {
155 .error_detected = igb_io_error_detected,
156 .slot_reset = igb_io_slot_reset,
157 .resume = igb_io_resume,
161 static struct pci_driver igb_driver = {
162 .name = igb_driver_name,
163 .id_table = igb_pci_tbl,
165 .remove = __devexit_p(igb_remove),
167 /* Power Managment Hooks */
168 .suspend = igb_suspend,
169 .resume = igb_resume,
171 .shutdown = igb_shutdown,
172 .err_handler = &igb_err_handler
175 static int global_quad_port_a; /* global quad port a indication */
177 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
178 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
179 MODULE_LICENSE("GPL");
180 MODULE_VERSION(DRV_VERSION);
184 * igb_get_hw_dev_name - return device name string
185 * used by hardware layer to print debugging information
187 char *igb_get_hw_dev_name(struct e1000_hw *hw)
189 struct igb_adapter *adapter = hw->back;
190 return adapter->netdev->name;
195 * igb_init_module - Driver Registration Routine
197 * igb_init_module is the first routine called when the driver is
198 * loaded. All it does is register with the PCI subsystem.
200 static int __init igb_init_module(void)
203 printk(KERN_INFO "%s - version %s\n",
204 igb_driver_string, igb_driver_version);
206 printk(KERN_INFO "%s\n", igb_copyright);
208 global_quad_port_a = 0;
210 ret = pci_register_driver(&igb_driver);
212 dca_register_notify(&dca_notifier);
217 module_init(igb_init_module);
220 * igb_exit_module - Driver Exit Cleanup Routine
222 * igb_exit_module is called just before the driver is removed
225 static void __exit igb_exit_module(void)
228 dca_unregister_notify(&dca_notifier);
230 pci_unregister_driver(&igb_driver);
233 module_exit(igb_exit_module);
236 * igb_alloc_queues - Allocate memory for all rings
237 * @adapter: board private structure to initialize
239 * We allocate one ring per queue at run-time since we don't know the
240 * number of queues at compile-time.
242 static int igb_alloc_queues(struct igb_adapter *adapter)
246 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
247 sizeof(struct igb_ring), GFP_KERNEL);
248 if (!adapter->tx_ring)
251 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
252 sizeof(struct igb_ring), GFP_KERNEL);
253 if (!adapter->rx_ring) {
254 kfree(adapter->tx_ring);
258 adapter->rx_ring->buddy = adapter->tx_ring;
260 for (i = 0; i < adapter->num_tx_queues; i++) {
261 struct igb_ring *ring = &(adapter->tx_ring[i]);
262 ring->adapter = adapter;
263 ring->queue_index = i;
265 for (i = 0; i < adapter->num_rx_queues; i++) {
266 struct igb_ring *ring = &(adapter->rx_ring[i]);
267 ring->adapter = adapter;
268 ring->queue_index = i;
269 ring->itr_register = E1000_ITR;
271 /* set a default napi handler for each rx_ring */
272 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
277 static void igb_free_queues(struct igb_adapter *adapter)
281 for (i = 0; i < adapter->num_rx_queues; i++)
282 netif_napi_del(&adapter->rx_ring[i].napi);
284 kfree(adapter->tx_ring);
285 kfree(adapter->rx_ring);
288 #define IGB_N0_QUEUE -1
289 static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
290 int tx_queue, int msix_vector)
293 struct e1000_hw *hw = &adapter->hw;
296 switch (hw->mac.type) {
298 /* The 82575 assigns vectors using a bitmask, which matches the
299 bitmask for the EICR/EIMS/EIMC registers. To assign one
300 or more queues to a vector, we write the appropriate bits
301 into the MSIXBM register for that vector. */
302 if (rx_queue > IGB_N0_QUEUE) {
303 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
304 adapter->rx_ring[rx_queue].eims_value = msixbm;
306 if (tx_queue > IGB_N0_QUEUE) {
307 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
308 adapter->tx_ring[tx_queue].eims_value =
309 E1000_EICR_TX_QUEUE0 << tx_queue;
311 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
314 /* Kawela uses a table-based method for assigning vectors.
315 Each queue has a single entry in the table to which we write
316 a vector number along with a "valid" bit. Sadly, the layout
317 of the table is somewhat counterintuitive. */
318 if (rx_queue > IGB_N0_QUEUE) {
319 index = (rx_queue & 0x7);
320 ivar = array_rd32(E1000_IVAR0, index);
322 /* vector goes into low byte of register */
323 ivar = ivar & 0xFFFFFF00;
324 ivar |= msix_vector | E1000_IVAR_VALID;
326 /* vector goes into third byte of register */
327 ivar = ivar & 0xFF00FFFF;
328 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
330 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
331 array_wr32(E1000_IVAR0, index, ivar);
333 if (tx_queue > IGB_N0_QUEUE) {
334 index = (tx_queue & 0x7);
335 ivar = array_rd32(E1000_IVAR0, index);
337 /* vector goes into second byte of register */
338 ivar = ivar & 0xFFFF00FF;
339 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
341 /* vector goes into high byte of register */
342 ivar = ivar & 0x00FFFFFF;
343 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
345 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
346 array_wr32(E1000_IVAR0, index, ivar);
356 * igb_configure_msix - Configure MSI-X hardware
358 * igb_configure_msix sets up the hardware to properly
359 * generate MSI-X interrupts.
361 static void igb_configure_msix(struct igb_adapter *adapter)
365 struct e1000_hw *hw = &adapter->hw;
367 adapter->eims_enable_mask = 0;
368 if (hw->mac.type == e1000_82576)
369 /* Turn on MSI-X capability first, or our settings
370 * won't stick. And it will take days to debug. */
371 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
372 E1000_GPIE_PBA | E1000_GPIE_EIAME |
375 for (i = 0; i < adapter->num_tx_queues; i++) {
376 struct igb_ring *tx_ring = &adapter->tx_ring[i];
377 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
378 adapter->eims_enable_mask |= tx_ring->eims_value;
379 if (tx_ring->itr_val)
380 writel(tx_ring->itr_val,
381 hw->hw_addr + tx_ring->itr_register);
383 writel(1, hw->hw_addr + tx_ring->itr_register);
386 for (i = 0; i < adapter->num_rx_queues; i++) {
387 struct igb_ring *rx_ring = &adapter->rx_ring[i];
389 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
390 adapter->eims_enable_mask |= rx_ring->eims_value;
391 if (rx_ring->itr_val)
392 writel(rx_ring->itr_val,
393 hw->hw_addr + rx_ring->itr_register);
395 writel(1, hw->hw_addr + rx_ring->itr_register);
399 /* set vector for other causes, i.e. link changes */
400 switch (hw->mac.type) {
402 array_wr32(E1000_MSIXBM(0), vector++,
405 tmp = rd32(E1000_CTRL_EXT);
406 /* enable MSI-X PBA support*/
407 tmp |= E1000_CTRL_EXT_PBA_CLR;
409 /* Auto-Mask interrupts upon ICR read. */
410 tmp |= E1000_CTRL_EXT_EIAME;
411 tmp |= E1000_CTRL_EXT_IRCA;
413 wr32(E1000_CTRL_EXT, tmp);
414 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
415 adapter->eims_other = E1000_EIMS_OTHER;
420 tmp = (vector++ | E1000_IVAR_VALID) << 8;
421 wr32(E1000_IVAR_MISC, tmp);
423 adapter->eims_enable_mask = (1 << (vector)) - 1;
424 adapter->eims_other = 1 << (vector - 1);
427 /* do nothing, since nothing else supports MSI-X */
429 } /* switch (hw->mac.type) */
434 * igb_request_msix - Initialize MSI-X interrupts
436 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
439 static int igb_request_msix(struct igb_adapter *adapter)
441 struct net_device *netdev = adapter->netdev;
442 int i, err = 0, vector = 0;
446 for (i = 0; i < adapter->num_tx_queues; i++) {
447 struct igb_ring *ring = &(adapter->tx_ring[i]);
448 sprintf(ring->name, "%s-tx%d", netdev->name, i);
449 err = request_irq(adapter->msix_entries[vector].vector,
450 &igb_msix_tx, 0, ring->name,
451 &(adapter->tx_ring[i]));
454 ring->itr_register = E1000_EITR(0) + (vector << 2);
455 ring->itr_val = 976; /* ~4000 ints/sec */
458 for (i = 0; i < adapter->num_rx_queues; i++) {
459 struct igb_ring *ring = &(adapter->rx_ring[i]);
460 if (strlen(netdev->name) < (IFNAMSIZ - 5))
461 sprintf(ring->name, "%s-rx%d", netdev->name, i);
463 memcpy(ring->name, netdev->name, IFNAMSIZ);
464 err = request_irq(adapter->msix_entries[vector].vector,
465 &igb_msix_rx, 0, ring->name,
466 &(adapter->rx_ring[i]));
469 ring->itr_register = E1000_EITR(0) + (vector << 2);
470 ring->itr_val = adapter->itr;
471 /* overwrite the poll routine for MSIX, we've already done
473 ring->napi.poll = &igb_clean_rx_ring_msix;
477 err = request_irq(adapter->msix_entries[vector].vector,
478 &igb_msix_other, 0, netdev->name, netdev);
482 igb_configure_msix(adapter);
488 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
490 if (adapter->msix_entries) {
491 pci_disable_msix(adapter->pdev);
492 kfree(adapter->msix_entries);
493 adapter->msix_entries = NULL;
494 } else if (adapter->flags & IGB_FLAG_HAS_MSI)
495 pci_disable_msi(adapter->pdev);
501 * igb_set_interrupt_capability - set MSI or MSI-X if supported
503 * Attempt to configure interrupts using the best available
504 * capabilities of the hardware and kernel.
506 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
511 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
512 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
514 if (!adapter->msix_entries)
517 for (i = 0; i < numvecs; i++)
518 adapter->msix_entries[i].entry = i;
520 err = pci_enable_msix(adapter->pdev,
521 adapter->msix_entries,
526 igb_reset_interrupt_capability(adapter);
528 /* If we can't do MSI-X, try MSI */
530 adapter->num_rx_queues = 1;
531 adapter->num_tx_queues = 1;
532 if (!pci_enable_msi(adapter->pdev))
533 adapter->flags |= IGB_FLAG_HAS_MSI;
535 /* Notify the stack of the (possibly) reduced Tx Queue count. */
536 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
541 * igb_request_irq - initialize interrupts
543 * Attempts to configure interrupts using the best available
544 * capabilities of the hardware and kernel.
546 static int igb_request_irq(struct igb_adapter *adapter)
548 struct net_device *netdev = adapter->netdev;
549 struct e1000_hw *hw = &adapter->hw;
552 if (adapter->msix_entries) {
553 err = igb_request_msix(adapter);
556 /* fall back to MSI */
557 igb_reset_interrupt_capability(adapter);
558 if (!pci_enable_msi(adapter->pdev))
559 adapter->flags |= IGB_FLAG_HAS_MSI;
560 igb_free_all_tx_resources(adapter);
561 igb_free_all_rx_resources(adapter);
562 adapter->num_rx_queues = 1;
563 igb_alloc_queues(adapter);
565 switch (hw->mac.type) {
567 wr32(E1000_MSIXBM(0),
568 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
571 wr32(E1000_IVAR0, E1000_IVAR_VALID);
578 if (adapter->flags & IGB_FLAG_HAS_MSI) {
579 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
580 netdev->name, netdev);
583 /* fall back to legacy interrupts */
584 igb_reset_interrupt_capability(adapter);
585 adapter->flags &= ~IGB_FLAG_HAS_MSI;
588 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
589 netdev->name, netdev);
592 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
599 static void igb_free_irq(struct igb_adapter *adapter)
601 struct net_device *netdev = adapter->netdev;
603 if (adapter->msix_entries) {
606 for (i = 0; i < adapter->num_tx_queues; i++)
607 free_irq(adapter->msix_entries[vector++].vector,
608 &(adapter->tx_ring[i]));
609 for (i = 0; i < adapter->num_rx_queues; i++)
610 free_irq(adapter->msix_entries[vector++].vector,
611 &(adapter->rx_ring[i]));
613 free_irq(adapter->msix_entries[vector++].vector, netdev);
617 free_irq(adapter->pdev->irq, netdev);
621 * igb_irq_disable - Mask off interrupt generation on the NIC
622 * @adapter: board private structure
624 static void igb_irq_disable(struct igb_adapter *adapter)
626 struct e1000_hw *hw = &adapter->hw;
628 if (adapter->msix_entries) {
630 wr32(E1000_EIMC, ~0);
637 synchronize_irq(adapter->pdev->irq);
641 * igb_irq_enable - Enable default interrupt generation settings
642 * @adapter: board private structure
644 static void igb_irq_enable(struct igb_adapter *adapter)
646 struct e1000_hw *hw = &adapter->hw;
648 if (adapter->msix_entries) {
649 wr32(E1000_EIAC, adapter->eims_enable_mask);
650 wr32(E1000_EIAM, adapter->eims_enable_mask);
651 wr32(E1000_EIMS, adapter->eims_enable_mask);
652 wr32(E1000_IMS, E1000_IMS_LSC);
654 wr32(E1000_IMS, IMS_ENABLE_MASK);
655 wr32(E1000_IAM, IMS_ENABLE_MASK);
659 static void igb_update_mng_vlan(struct igb_adapter *adapter)
661 struct net_device *netdev = adapter->netdev;
662 u16 vid = adapter->hw.mng_cookie.vlan_id;
663 u16 old_vid = adapter->mng_vlan_id;
664 if (adapter->vlgrp) {
665 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
666 if (adapter->hw.mng_cookie.status &
667 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
668 igb_vlan_rx_add_vid(netdev, vid);
669 adapter->mng_vlan_id = vid;
671 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
673 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
675 !vlan_group_get_device(adapter->vlgrp, old_vid))
676 igb_vlan_rx_kill_vid(netdev, old_vid);
678 adapter->mng_vlan_id = vid;
683 * igb_release_hw_control - release control of the h/w to f/w
684 * @adapter: address of board private structure
686 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
687 * For ASF and Pass Through versions of f/w this means that the
688 * driver is no longer loaded.
691 static void igb_release_hw_control(struct igb_adapter *adapter)
693 struct e1000_hw *hw = &adapter->hw;
696 /* Let firmware take over control of h/w */
697 ctrl_ext = rd32(E1000_CTRL_EXT);
699 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
704 * igb_get_hw_control - get control of the h/w from f/w
705 * @adapter: address of board private structure
707 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
708 * For ASF and Pass Through versions of f/w this means that
709 * the driver is loaded.
712 static void igb_get_hw_control(struct igb_adapter *adapter)
714 struct e1000_hw *hw = &adapter->hw;
717 /* Let firmware know the driver has taken over */
718 ctrl_ext = rd32(E1000_CTRL_EXT);
720 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
723 static void igb_init_manageability(struct igb_adapter *adapter)
725 struct e1000_hw *hw = &adapter->hw;
727 if (adapter->en_mng_pt) {
728 u32 manc2h = rd32(E1000_MANC2H);
729 u32 manc = rd32(E1000_MANC);
731 /* enable receiving management packets to the host */
732 /* this will probably generate destination unreachable messages
733 * from the host OS, but the packets will be handled on SMBUS */
734 manc |= E1000_MANC_EN_MNG2HOST;
735 #define E1000_MNG2HOST_PORT_623 (1 << 5)
736 #define E1000_MNG2HOST_PORT_664 (1 << 6)
737 manc2h |= E1000_MNG2HOST_PORT_623;
738 manc2h |= E1000_MNG2HOST_PORT_664;
739 wr32(E1000_MANC2H, manc2h);
741 wr32(E1000_MANC, manc);
746 * igb_configure - configure the hardware for RX and TX
747 * @adapter: private board structure
749 static void igb_configure(struct igb_adapter *adapter)
751 struct net_device *netdev = adapter->netdev;
754 igb_get_hw_control(adapter);
755 igb_set_multi(netdev);
757 igb_restore_vlan(adapter);
758 igb_init_manageability(adapter);
760 igb_configure_tx(adapter);
761 igb_setup_rctl(adapter);
762 igb_configure_rx(adapter);
764 igb_rx_fifo_flush_82575(&adapter->hw);
766 /* call IGB_DESC_UNUSED which always leaves
767 * at least 1 descriptor unused to make sure
768 * next_to_use != next_to_clean */
769 for (i = 0; i < adapter->num_rx_queues; i++) {
770 struct igb_ring *ring = &adapter->rx_ring[i];
771 igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring));
775 adapter->tx_queue_len = netdev->tx_queue_len;
780 * igb_up - Open the interface and prepare it to handle traffic
781 * @adapter: board private structure
784 int igb_up(struct igb_adapter *adapter)
786 struct e1000_hw *hw = &adapter->hw;
789 /* hardware has been reset, we need to reload some things */
790 igb_configure(adapter);
792 clear_bit(__IGB_DOWN, &adapter->state);
794 for (i = 0; i < adapter->num_rx_queues; i++)
795 napi_enable(&adapter->rx_ring[i].napi);
796 if (adapter->msix_entries)
797 igb_configure_msix(adapter);
799 /* Clear any pending interrupts. */
801 igb_irq_enable(adapter);
803 /* Fire a link change interrupt to start the watchdog. */
804 wr32(E1000_ICS, E1000_ICS_LSC);
808 void igb_down(struct igb_adapter *adapter)
810 struct e1000_hw *hw = &adapter->hw;
811 struct net_device *netdev = adapter->netdev;
815 /* signal that we're down so the interrupt handler does not
816 * reschedule our watchdog timer */
817 set_bit(__IGB_DOWN, &adapter->state);
819 /* disable receives in the hardware */
820 rctl = rd32(E1000_RCTL);
821 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
822 /* flush and sleep below */
824 netif_stop_queue(netdev);
825 for (i = 0; i < adapter->num_tx_queues; i++)
826 netif_stop_subqueue(netdev, i);
828 /* disable transmits in the hardware */
829 tctl = rd32(E1000_TCTL);
830 tctl &= ~E1000_TCTL_EN;
831 wr32(E1000_TCTL, tctl);
832 /* flush both disables and wait for them to finish */
836 for (i = 0; i < adapter->num_rx_queues; i++)
837 napi_disable(&adapter->rx_ring[i].napi);
839 igb_irq_disable(adapter);
841 del_timer_sync(&adapter->watchdog_timer);
842 del_timer_sync(&adapter->phy_info_timer);
844 netdev->tx_queue_len = adapter->tx_queue_len;
845 netif_carrier_off(netdev);
846 adapter->link_speed = 0;
847 adapter->link_duplex = 0;
849 if (!pci_channel_offline(adapter->pdev))
851 igb_clean_all_tx_rings(adapter);
852 igb_clean_all_rx_rings(adapter);
855 void igb_reinit_locked(struct igb_adapter *adapter)
857 WARN_ON(in_interrupt());
858 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
862 clear_bit(__IGB_RESETTING, &adapter->state);
865 void igb_reset(struct igb_adapter *adapter)
867 struct e1000_hw *hw = &adapter->hw;
868 struct e1000_mac_info *mac = &hw->mac;
869 struct e1000_fc_info *fc = &hw->fc;
870 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
873 /* Repartition Pba for greater than 9k mtu
874 * To take effect CTRL.RST is required.
876 if (mac->type != e1000_82576) {
883 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
884 (mac->type < e1000_82576)) {
885 /* adjust PBA for jumbo frames */
886 wr32(E1000_PBA, pba);
888 /* To maintain wire speed transmits, the Tx FIFO should be
889 * large enough to accommodate two full transmit packets,
890 * rounded up to the next 1KB and expressed in KB. Likewise,
891 * the Rx FIFO should be large enough to accommodate at least
892 * one full receive packet and is similarly rounded up and
893 * expressed in KB. */
894 pba = rd32(E1000_PBA);
895 /* upper 16 bits has Tx packet buffer allocation size in KB */
896 tx_space = pba >> 16;
897 /* lower 16 bits has Rx packet buffer allocation size in KB */
899 /* the tx fifo also stores 16 bytes of information about the tx
900 * but don't include ethernet FCS because hardware appends it */
901 min_tx_space = (adapter->max_frame_size +
902 sizeof(struct e1000_tx_desc) -
904 min_tx_space = ALIGN(min_tx_space, 1024);
906 /* software strips receive CRC, so leave room for it */
907 min_rx_space = adapter->max_frame_size;
908 min_rx_space = ALIGN(min_rx_space, 1024);
911 /* If current Tx allocation is less than the min Tx FIFO size,
912 * and the min Tx FIFO size is less than the current Rx FIFO
913 * allocation, take space away from current Rx allocation */
914 if (tx_space < min_tx_space &&
915 ((min_tx_space - tx_space) < pba)) {
916 pba = pba - (min_tx_space - tx_space);
918 /* if short on rx space, rx wins and must trump tx
920 if (pba < min_rx_space)
923 wr32(E1000_PBA, pba);
926 /* flow control settings */
927 /* The high water mark must be low enough to fit one full frame
928 * (or the size used for early receive) above it in the Rx FIFO.
929 * Set it to the lower of:
930 * - 90% of the Rx FIFO size, or
931 * - the full Rx FIFO size minus one full frame */
932 hwm = min(((pba << 10) * 9 / 10),
933 ((pba << 10) - 2 * adapter->max_frame_size));
935 if (mac->type < e1000_82576) {
936 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
937 fc->low_water = fc->high_water - 8;
939 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
940 fc->low_water = fc->high_water - 16;
942 fc->pause_time = 0xFFFF;
944 fc->type = fc->original_type;
946 /* Allow time for pending master requests to run */
947 adapter->hw.mac.ops.reset_hw(&adapter->hw);
950 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
951 dev_err(&adapter->pdev->dev, "Hardware Error\n");
953 igb_update_mng_vlan(adapter);
955 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
956 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
958 igb_reset_adaptive(&adapter->hw);
959 if (adapter->hw.phy.ops.get_phy_info)
960 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
964 * igb_is_need_ioport - determine if an adapter needs ioport resources or not
965 * @pdev: PCI device information struct
967 * Returns true if an adapter needs ioport resources
969 static int igb_is_need_ioport(struct pci_dev *pdev)
971 switch (pdev->device) {
972 /* Currently there are no adapters that need ioport resources */
979 * igb_probe - Device Initialization Routine
980 * @pdev: PCI device information struct
981 * @ent: entry in igb_pci_tbl
983 * Returns 0 on success, negative on failure
985 * igb_probe initializes an adapter identified by a pci_dev structure.
986 * The OS initialization, configuring of the adapter private structure,
987 * and a hardware reset occur.
989 static int __devinit igb_probe(struct pci_dev *pdev,
990 const struct pci_device_id *ent)
992 struct net_device *netdev;
993 struct igb_adapter *adapter;
995 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
996 unsigned long mmio_start, mmio_len;
997 int i, err, pci_using_dac;
999 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1001 int bars, need_ioport;
1003 /* do not allocate ioport bars when not needed */
1004 need_ioport = igb_is_need_ioport(pdev);
1006 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
1007 err = pci_enable_device(pdev);
1009 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1010 err = pci_enable_device_mem(pdev);
1016 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1018 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1022 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1024 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1026 dev_err(&pdev->dev, "No usable DMA "
1027 "configuration, aborting\n");
1033 err = pci_request_selected_regions(pdev, bars, igb_driver_name);
1037 pci_set_master(pdev);
1038 pci_save_state(pdev);
1041 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), IGB_MAX_TX_QUEUES);
1043 goto err_alloc_etherdev;
1045 SET_NETDEV_DEV(netdev, &pdev->dev);
1047 pci_set_drvdata(pdev, netdev);
1048 adapter = netdev_priv(netdev);
1049 adapter->netdev = netdev;
1050 adapter->pdev = pdev;
1053 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1054 adapter->bars = bars;
1055 adapter->need_ioport = need_ioport;
1057 mmio_start = pci_resource_start(pdev, 0);
1058 mmio_len = pci_resource_len(pdev, 0);
1061 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
1062 if (!adapter->hw.hw_addr)
1065 netdev->open = &igb_open;
1066 netdev->stop = &igb_close;
1067 netdev->get_stats = &igb_get_stats;
1068 netdev->set_multicast_list = &igb_set_multi;
1069 netdev->set_mac_address = &igb_set_mac;
1070 netdev->change_mtu = &igb_change_mtu;
1071 netdev->do_ioctl = &igb_ioctl;
1072 igb_set_ethtool_ops(netdev);
1073 netdev->tx_timeout = &igb_tx_timeout;
1074 netdev->watchdog_timeo = 5 * HZ;
1075 netdev->vlan_rx_register = igb_vlan_rx_register;
1076 netdev->vlan_rx_add_vid = igb_vlan_rx_add_vid;
1077 netdev->vlan_rx_kill_vid = igb_vlan_rx_kill_vid;
1078 #ifdef CONFIG_NET_POLL_CONTROLLER
1079 netdev->poll_controller = igb_netpoll;
1081 netdev->hard_start_xmit = &igb_xmit_frame_adv;
1083 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1085 netdev->mem_start = mmio_start;
1086 netdev->mem_end = mmio_start + mmio_len;
1088 /* PCI config space info */
1089 hw->vendor_id = pdev->vendor;
1090 hw->device_id = pdev->device;
1091 hw->revision_id = pdev->revision;
1092 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1093 hw->subsystem_device_id = pdev->subsystem_device;
1095 /* setup the private structure */
1097 /* Copy the default MAC, PHY and NVM function pointers */
1098 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1099 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1100 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1101 /* Initialize skew-specific constants */
1102 err = ei->get_invariants(hw);
1106 err = igb_sw_init(adapter);
1110 igb_get_bus_info_pcie(hw);
1113 switch (hw->mac.type) {
1116 adapter->flags |= IGB_FLAG_HAS_DCA;
1117 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1123 hw->phy.autoneg_wait_to_complete = false;
1124 hw->mac.adaptive_ifs = true;
1126 /* Copper options */
1127 if (hw->phy.media_type == e1000_media_type_copper) {
1128 hw->phy.mdix = AUTO_ALL_MODES;
1129 hw->phy.disable_polarity_correction = false;
1130 hw->phy.ms_type = e1000_ms_hw_default;
1133 if (igb_check_reset_block(hw))
1134 dev_info(&pdev->dev,
1135 "PHY reset is blocked due to SOL/IDER session.\n");
1137 netdev->features = NETIF_F_SG |
1139 NETIF_F_HW_VLAN_TX |
1140 NETIF_F_HW_VLAN_RX |
1141 NETIF_F_HW_VLAN_FILTER;
1143 netdev->features |= NETIF_F_TSO;
1144 netdev->features |= NETIF_F_TSO6;
1146 #ifdef CONFIG_IGB_LRO
1147 netdev->features |= NETIF_F_LRO;
1150 netdev->vlan_features |= NETIF_F_TSO;
1151 netdev->vlan_features |= NETIF_F_TSO6;
1152 netdev->vlan_features |= NETIF_F_HW_CSUM;
1153 netdev->vlan_features |= NETIF_F_SG;
1156 netdev->features |= NETIF_F_HIGHDMA;
1158 netdev->features |= NETIF_F_MULTI_QUEUE;
1160 netdev->features |= NETIF_F_LLTX;
1161 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1163 /* before reading the NVM, reset the controller to put the device in a
1164 * known good starting state */
1165 hw->mac.ops.reset_hw(hw);
1167 /* make sure the NVM is good */
1168 if (igb_validate_nvm_checksum(hw) < 0) {
1169 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1174 /* copy the MAC address out of the NVM */
1175 if (hw->mac.ops.read_mac_addr(hw))
1176 dev_err(&pdev->dev, "NVM Read Error\n");
1178 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1179 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1181 if (!is_valid_ether_addr(netdev->perm_addr)) {
1182 dev_err(&pdev->dev, "Invalid MAC Address\n");
1187 init_timer(&adapter->watchdog_timer);
1188 adapter->watchdog_timer.function = &igb_watchdog;
1189 adapter->watchdog_timer.data = (unsigned long) adapter;
1191 init_timer(&adapter->phy_info_timer);
1192 adapter->phy_info_timer.function = &igb_update_phy_info;
1193 adapter->phy_info_timer.data = (unsigned long) adapter;
1195 INIT_WORK(&adapter->reset_task, igb_reset_task);
1196 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1198 /* Initialize link & ring properties that are user-changeable */
1199 adapter->tx_ring->count = 256;
1200 for (i = 0; i < adapter->num_tx_queues; i++)
1201 adapter->tx_ring[i].count = adapter->tx_ring->count;
1202 adapter->rx_ring->count = 256;
1203 for (i = 0; i < adapter->num_rx_queues; i++)
1204 adapter->rx_ring[i].count = adapter->rx_ring->count;
1206 adapter->fc_autoneg = true;
1207 hw->mac.autoneg = true;
1208 hw->phy.autoneg_advertised = 0x2f;
1210 hw->fc.original_type = e1000_fc_default;
1211 hw->fc.type = e1000_fc_default;
1213 adapter->itr_setting = 3;
1214 adapter->itr = IGB_START_ITR;
1216 igb_validate_mdi_setting(hw);
1218 adapter->rx_csum = 1;
1220 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1221 * enable the ACPI Magic Packet filter
1224 if (hw->bus.func == 0 ||
1225 hw->device_id == E1000_DEV_ID_82575EB_COPPER)
1226 hw->nvm.ops.read_nvm(hw, NVM_INIT_CONTROL3_PORT_A, 1,
1229 if (eeprom_data & eeprom_apme_mask)
1230 adapter->eeprom_wol |= E1000_WUFC_MAG;
1232 /* now that we have the eeprom settings, apply the special cases where
1233 * the eeprom may be wrong or the board simply won't support wake on
1234 * lan on a particular port */
1235 switch (pdev->device) {
1236 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1237 adapter->eeprom_wol = 0;
1239 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1240 case E1000_DEV_ID_82576_FIBER:
1241 case E1000_DEV_ID_82576_SERDES:
1242 /* Wake events only supported on port A for dual fiber
1243 * regardless of eeprom setting */
1244 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1245 adapter->eeprom_wol = 0;
1247 case E1000_DEV_ID_82576_QUAD_COPPER:
1248 /* if quad port adapter, disable WoL on all but port A */
1249 if (global_quad_port_a != 0)
1250 adapter->eeprom_wol = 0;
1252 adapter->flags |= IGB_FLAG_QUAD_PORT_A;
1253 /* Reset for multiple quad port adapters */
1254 if (++global_quad_port_a == 4)
1255 global_quad_port_a = 0;
1259 /* initialize the wol settings based on the eeprom settings */
1260 adapter->wol = adapter->eeprom_wol;
1262 /* reset the hardware with the new settings */
1265 /* let the f/w know that the h/w is now under the control of the
1267 igb_get_hw_control(adapter);
1269 /* tell the stack to leave us alone until igb_open() is called */
1270 netif_carrier_off(netdev);
1271 netif_stop_queue(netdev);
1272 for (i = 0; i < adapter->num_tx_queues; i++)
1273 netif_stop_subqueue(netdev, i);
1275 strcpy(netdev->name, "eth%d");
1276 err = register_netdev(netdev);
1281 if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
1282 (dca_add_requester(&pdev->dev) == 0)) {
1283 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1284 dev_info(&pdev->dev, "DCA enabled\n");
1285 /* Always use CB2 mode, difference is masked
1286 * in the CB driver. */
1287 wr32(E1000_DCA_CTRL, 2);
1288 igb_setup_dca(adapter);
1292 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1293 /* print bus type/speed/width info */
1294 dev_info(&pdev->dev,
1295 "%s: (PCIe:%s:%s) %02x:%02x:%02x:%02x:%02x:%02x\n",
1297 ((hw->bus.speed == e1000_bus_speed_2500)
1298 ? "2.5Gb/s" : "unknown"),
1299 ((hw->bus.width == e1000_bus_width_pcie_x4)
1300 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1301 ? "Width x1" : "unknown"),
1302 netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
1303 netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]);
1305 igb_read_part_num(hw, &part_num);
1306 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1307 (part_num >> 8), (part_num & 0xff));
1309 dev_info(&pdev->dev,
1310 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1311 adapter->msix_entries ? "MSI-X" :
1312 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1313 adapter->num_rx_queues, adapter->num_tx_queues);
1318 igb_release_hw_control(adapter);
1320 if (!igb_check_reset_block(hw))
1321 hw->phy.ops.reset_phy(hw);
1323 if (hw->flash_address)
1324 iounmap(hw->flash_address);
1326 igb_remove_device(hw);
1327 igb_free_queues(adapter);
1330 iounmap(hw->hw_addr);
1332 free_netdev(netdev);
1334 pci_release_selected_regions(pdev, bars);
1337 pci_disable_device(pdev);
1342 * igb_remove - Device Removal Routine
1343 * @pdev: PCI device information struct
1345 * igb_remove is called by the PCI subsystem to alert the driver
1346 * that it should release a PCI device. The could be caused by a
1347 * Hot-Plug event, or because the driver is going to be removed from
1350 static void __devexit igb_remove(struct pci_dev *pdev)
1352 struct net_device *netdev = pci_get_drvdata(pdev);
1353 struct igb_adapter *adapter = netdev_priv(netdev);
1355 struct e1000_hw *hw = &adapter->hw;
1358 /* flush_scheduled work may reschedule our watchdog task, so
1359 * explicitly disable watchdog tasks from being rescheduled */
1360 set_bit(__IGB_DOWN, &adapter->state);
1361 del_timer_sync(&adapter->watchdog_timer);
1362 del_timer_sync(&adapter->phy_info_timer);
1364 flush_scheduled_work();
1367 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1368 dev_info(&pdev->dev, "DCA disabled\n");
1369 dca_remove_requester(&pdev->dev);
1370 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1371 wr32(E1000_DCA_CTRL, 1);
1375 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1376 * would have already happened in close and is redundant. */
1377 igb_release_hw_control(adapter);
1379 unregister_netdev(netdev);
1381 if (!igb_check_reset_block(&adapter->hw))
1382 adapter->hw.phy.ops.reset_phy(&adapter->hw);
1384 igb_remove_device(&adapter->hw);
1385 igb_reset_interrupt_capability(adapter);
1387 igb_free_queues(adapter);
1389 iounmap(adapter->hw.hw_addr);
1390 if (adapter->hw.flash_address)
1391 iounmap(adapter->hw.flash_address);
1392 pci_release_selected_regions(pdev, adapter->bars);
1394 free_netdev(netdev);
1396 pci_disable_device(pdev);
1400 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1401 * @adapter: board private structure to initialize
1403 * igb_sw_init initializes the Adapter private data structure.
1404 * Fields are initialized based on PCI device information and
1405 * OS network device settings (MTU size).
1407 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1409 struct e1000_hw *hw = &adapter->hw;
1410 struct net_device *netdev = adapter->netdev;
1411 struct pci_dev *pdev = adapter->pdev;
1413 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1415 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1416 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1417 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1418 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1420 /* Number of supported queues. */
1421 /* Having more queues than CPUs doesn't make sense. */
1422 adapter->num_rx_queues = min((u32)IGB_MAX_RX_QUEUES, (u32)num_online_cpus());
1423 adapter->num_tx_queues = min(IGB_MAX_TX_QUEUES, num_online_cpus());
1425 /* This call may decrease the number of queues depending on
1426 * interrupt mode. */
1427 igb_set_interrupt_capability(adapter);
1429 if (igb_alloc_queues(adapter)) {
1430 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1434 /* Explicitly disable IRQ since the NIC can be in any state. */
1435 igb_irq_disable(adapter);
1437 set_bit(__IGB_DOWN, &adapter->state);
1442 * igb_open - Called when a network interface is made active
1443 * @netdev: network interface device structure
1445 * Returns 0 on success, negative value on failure
1447 * The open entry point is called when a network interface is made
1448 * active by the system (IFF_UP). At this point all resources needed
1449 * for transmit and receive operations are allocated, the interrupt
1450 * handler is registered with the OS, the watchdog timer is started,
1451 * and the stack is notified that the interface is ready.
1453 static int igb_open(struct net_device *netdev)
1455 struct igb_adapter *adapter = netdev_priv(netdev);
1456 struct e1000_hw *hw = &adapter->hw;
1460 /* disallow open during test */
1461 if (test_bit(__IGB_TESTING, &adapter->state))
1464 /* allocate transmit descriptors */
1465 err = igb_setup_all_tx_resources(adapter);
1469 /* allocate receive descriptors */
1470 err = igb_setup_all_rx_resources(adapter);
1474 /* e1000_power_up_phy(adapter); */
1476 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1477 if ((adapter->hw.mng_cookie.status &
1478 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1479 igb_update_mng_vlan(adapter);
1481 /* before we allocate an interrupt, we must be ready to handle it.
1482 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1483 * as soon as we call pci_request_irq, so we have to setup our
1484 * clean_rx handler before we do so. */
1485 igb_configure(adapter);
1487 err = igb_request_irq(adapter);
1491 /* From here on the code is the same as igb_up() */
1492 clear_bit(__IGB_DOWN, &adapter->state);
1494 for (i = 0; i < adapter->num_rx_queues; i++)
1495 napi_enable(&adapter->rx_ring[i].napi);
1497 /* Clear any pending interrupts. */
1500 igb_irq_enable(adapter);
1502 /* Fire a link status change interrupt to start the watchdog. */
1503 wr32(E1000_ICS, E1000_ICS_LSC);
1508 igb_release_hw_control(adapter);
1509 /* e1000_power_down_phy(adapter); */
1510 igb_free_all_rx_resources(adapter);
1512 igb_free_all_tx_resources(adapter);
1520 * igb_close - Disables a network interface
1521 * @netdev: network interface device structure
1523 * Returns 0, this is not allowed to fail
1525 * The close entry point is called when an interface is de-activated
1526 * by the OS. The hardware is still under the driver's control, but
1527 * needs to be disabled. A global MAC reset is issued to stop the
1528 * hardware, and all transmit and receive resources are freed.
1530 static int igb_close(struct net_device *netdev)
1532 struct igb_adapter *adapter = netdev_priv(netdev);
1534 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1537 igb_free_irq(adapter);
1539 igb_free_all_tx_resources(adapter);
1540 igb_free_all_rx_resources(adapter);
1542 /* kill manageability vlan ID if supported, but not if a vlan with
1543 * the same ID is registered on the host OS (let 8021q kill it) */
1544 if ((adapter->hw.mng_cookie.status &
1545 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1547 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1548 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1554 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1555 * @adapter: board private structure
1556 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1558 * Return 0 on success, negative on failure
1561 int igb_setup_tx_resources(struct igb_adapter *adapter,
1562 struct igb_ring *tx_ring)
1564 struct pci_dev *pdev = adapter->pdev;
1567 size = sizeof(struct igb_buffer) * tx_ring->count;
1568 tx_ring->buffer_info = vmalloc(size);
1569 if (!tx_ring->buffer_info)
1571 memset(tx_ring->buffer_info, 0, size);
1573 /* round up to nearest 4K */
1574 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc)
1576 tx_ring->size = ALIGN(tx_ring->size, 4096);
1578 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1584 tx_ring->adapter = adapter;
1585 tx_ring->next_to_use = 0;
1586 tx_ring->next_to_clean = 0;
1590 vfree(tx_ring->buffer_info);
1591 dev_err(&adapter->pdev->dev,
1592 "Unable to allocate memory for the transmit descriptor ring\n");
1597 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1598 * (Descriptors) for all queues
1599 * @adapter: board private structure
1601 * Return 0 on success, negative on failure
1603 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1608 for (i = 0; i < adapter->num_tx_queues; i++) {
1609 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1611 dev_err(&adapter->pdev->dev,
1612 "Allocation for Tx Queue %u failed\n", i);
1613 for (i--; i >= 0; i--)
1614 igb_free_tx_resources(&adapter->tx_ring[i]);
1619 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1620 r_idx = i % adapter->num_tx_queues;
1621 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1627 * igb_configure_tx - Configure transmit Unit after Reset
1628 * @adapter: board private structure
1630 * Configure the Tx unit of the MAC after a reset.
1632 static void igb_configure_tx(struct igb_adapter *adapter)
1635 struct e1000_hw *hw = &adapter->hw;
1640 for (i = 0; i < adapter->num_tx_queues; i++) {
1641 struct igb_ring *ring = &(adapter->tx_ring[i]);
1643 wr32(E1000_TDLEN(i),
1644 ring->count * sizeof(struct e1000_tx_desc));
1646 wr32(E1000_TDBAL(i),
1647 tdba & 0x00000000ffffffffULL);
1648 wr32(E1000_TDBAH(i), tdba >> 32);
1650 tdwba = ring->dma + ring->count * sizeof(struct e1000_tx_desc);
1651 tdwba |= 1; /* enable head wb */
1652 wr32(E1000_TDWBAL(i),
1653 tdwba & 0x00000000ffffffffULL);
1654 wr32(E1000_TDWBAH(i), tdwba >> 32);
1656 ring->head = E1000_TDH(i);
1657 ring->tail = E1000_TDT(i);
1658 writel(0, hw->hw_addr + ring->tail);
1659 writel(0, hw->hw_addr + ring->head);
1660 txdctl = rd32(E1000_TXDCTL(i));
1661 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1662 wr32(E1000_TXDCTL(i), txdctl);
1664 /* Turn off Relaxed Ordering on head write-backs. The
1665 * writebacks MUST be delivered in order or it will
1666 * completely screw up our bookeeping.
1668 txctrl = rd32(E1000_DCA_TXCTRL(i));
1669 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1670 wr32(E1000_DCA_TXCTRL(i), txctrl);
1675 /* Use the default values for the Tx Inter Packet Gap (IPG) timer */
1677 /* Program the Transmit Control Register */
1679 tctl = rd32(E1000_TCTL);
1680 tctl &= ~E1000_TCTL_CT;
1681 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1682 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1684 igb_config_collision_dist(hw);
1686 /* Setup Transmit Descriptor Settings for eop descriptor */
1687 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1689 /* Enable transmits */
1690 tctl |= E1000_TCTL_EN;
1692 wr32(E1000_TCTL, tctl);
1696 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1697 * @adapter: board private structure
1698 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1700 * Returns 0 on success, negative on failure
1703 int igb_setup_rx_resources(struct igb_adapter *adapter,
1704 struct igb_ring *rx_ring)
1706 struct pci_dev *pdev = adapter->pdev;
1709 #ifdef CONFIG_IGB_LRO
1710 size = sizeof(struct net_lro_desc) * MAX_LRO_DESCRIPTORS;
1711 rx_ring->lro_mgr.lro_arr = vmalloc(size);
1712 if (!rx_ring->lro_mgr.lro_arr)
1714 memset(rx_ring->lro_mgr.lro_arr, 0, size);
1717 size = sizeof(struct igb_buffer) * rx_ring->count;
1718 rx_ring->buffer_info = vmalloc(size);
1719 if (!rx_ring->buffer_info)
1721 memset(rx_ring->buffer_info, 0, size);
1723 desc_len = sizeof(union e1000_adv_rx_desc);
1725 /* Round up to nearest 4K */
1726 rx_ring->size = rx_ring->count * desc_len;
1727 rx_ring->size = ALIGN(rx_ring->size, 4096);
1729 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1735 rx_ring->next_to_clean = 0;
1736 rx_ring->next_to_use = 0;
1738 rx_ring->adapter = adapter;
1743 #ifdef CONFIG_IGB_LRO
1744 vfree(rx_ring->lro_mgr.lro_arr);
1745 rx_ring->lro_mgr.lro_arr = NULL;
1747 vfree(rx_ring->buffer_info);
1748 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1749 "the receive descriptor ring\n");
1754 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1755 * (Descriptors) for all queues
1756 * @adapter: board private structure
1758 * Return 0 on success, negative on failure
1760 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1764 for (i = 0; i < adapter->num_rx_queues; i++) {
1765 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767 dev_err(&adapter->pdev->dev,
1768 "Allocation for Rx Queue %u failed\n", i);
1769 for (i--; i >= 0; i--)
1770 igb_free_rx_resources(&adapter->rx_ring[i]);
1779 * igb_setup_rctl - configure the receive control registers
1780 * @adapter: Board private structure
1782 static void igb_setup_rctl(struct igb_adapter *adapter)
1784 struct e1000_hw *hw = &adapter->hw;
1789 rctl = rd32(E1000_RCTL);
1791 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1793 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1794 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1795 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1798 * enable stripping of CRC. It's unlikely this will break BMC
1799 * redirection as it did with e1000. Newer features require
1800 * that the HW strips the CRC.
1802 rctl |= E1000_RCTL_SECRC;
1804 rctl &= ~E1000_RCTL_SBP;
1806 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1807 rctl &= ~E1000_RCTL_LPE;
1809 rctl |= E1000_RCTL_LPE;
1810 if (adapter->rx_buffer_len <= IGB_RXBUFFER_2048) {
1811 /* Setup buffer sizes */
1812 rctl &= ~E1000_RCTL_SZ_4096;
1813 rctl |= E1000_RCTL_BSEX;
1814 switch (adapter->rx_buffer_len) {
1815 case IGB_RXBUFFER_256:
1816 rctl |= E1000_RCTL_SZ_256;
1817 rctl &= ~E1000_RCTL_BSEX;
1819 case IGB_RXBUFFER_512:
1820 rctl |= E1000_RCTL_SZ_512;
1821 rctl &= ~E1000_RCTL_BSEX;
1823 case IGB_RXBUFFER_1024:
1824 rctl |= E1000_RCTL_SZ_1024;
1825 rctl &= ~E1000_RCTL_BSEX;
1827 case IGB_RXBUFFER_2048:
1829 rctl |= E1000_RCTL_SZ_2048;
1830 rctl &= ~E1000_RCTL_BSEX;
1834 rctl &= ~E1000_RCTL_BSEX;
1835 srrctl = adapter->rx_buffer_len >> E1000_SRRCTL_BSIZEPKT_SHIFT;
1838 /* 82575 and greater support packet-split where the protocol
1839 * header is placed in skb->data and the packet data is
1840 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1841 * In the case of a non-split, skb->data is linearly filled,
1842 * followed by the page buffers. Therefore, skb->data is
1843 * sized to hold the largest protocol header.
1845 /* allocations using alloc_page take too long for regular MTU
1846 * so only enable packet split for jumbo frames */
1847 if (rctl & E1000_RCTL_LPE) {
1848 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
1849 srrctl |= adapter->rx_ps_hdr_size <<
1850 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1851 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1853 adapter->rx_ps_hdr_size = 0;
1854 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1857 for (i = 0; i < adapter->num_rx_queues; i++)
1858 wr32(E1000_SRRCTL(i), srrctl);
1860 wr32(E1000_RCTL, rctl);
1864 * igb_configure_rx - Configure receive Unit after Reset
1865 * @adapter: board private structure
1867 * Configure the Rx unit of the MAC after a reset.
1869 static void igb_configure_rx(struct igb_adapter *adapter)
1872 struct e1000_hw *hw = &adapter->hw;
1877 /* disable receives while setting up the descriptors */
1878 rctl = rd32(E1000_RCTL);
1879 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
1883 if (adapter->itr_setting > 3)
1884 wr32(E1000_ITR, adapter->itr);
1886 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1887 * the Base and Length of the Rx Descriptor Ring */
1888 for (i = 0; i < adapter->num_rx_queues; i++) {
1889 struct igb_ring *ring = &(adapter->rx_ring[i]);
1891 wr32(E1000_RDBAL(i),
1892 rdba & 0x00000000ffffffffULL);
1893 wr32(E1000_RDBAH(i), rdba >> 32);
1894 wr32(E1000_RDLEN(i),
1895 ring->count * sizeof(union e1000_adv_rx_desc));
1897 ring->head = E1000_RDH(i);
1898 ring->tail = E1000_RDT(i);
1899 writel(0, hw->hw_addr + ring->tail);
1900 writel(0, hw->hw_addr + ring->head);
1902 rxdctl = rd32(E1000_RXDCTL(i));
1903 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1904 rxdctl &= 0xFFF00000;
1905 rxdctl |= IGB_RX_PTHRESH;
1906 rxdctl |= IGB_RX_HTHRESH << 8;
1907 rxdctl |= IGB_RX_WTHRESH << 16;
1908 wr32(E1000_RXDCTL(i), rxdctl);
1909 #ifdef CONFIG_IGB_LRO
1910 /* Intitial LRO Settings */
1911 ring->lro_mgr.max_aggr = MAX_LRO_AGGR;
1912 ring->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1913 ring->lro_mgr.get_skb_header = igb_get_skb_hdr;
1914 ring->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1915 ring->lro_mgr.dev = adapter->netdev;
1916 ring->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1917 ring->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1921 if (adapter->num_rx_queues > 1) {
1930 get_random_bytes(&random[0], 40);
1932 if (hw->mac.type >= e1000_82576)
1936 for (j = 0; j < (32 * 4); j++) {
1938 (j % adapter->num_rx_queues) << shift;
1941 hw->hw_addr + E1000_RETA(0) + (j & ~3));
1943 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
1945 /* Fill out hash function seeds */
1946 for (j = 0; j < 10; j++)
1947 array_wr32(E1000_RSSRK(0), j, random[j]);
1949 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
1950 E1000_MRQC_RSS_FIELD_IPV4_TCP);
1951 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
1952 E1000_MRQC_RSS_FIELD_IPV6_TCP);
1953 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
1954 E1000_MRQC_RSS_FIELD_IPV6_UDP);
1955 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
1956 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
1959 wr32(E1000_MRQC, mrqc);
1961 /* Multiqueue and raw packet checksumming are mutually
1962 * exclusive. Note that this not the same as TCP/IP
1963 * checksumming, which works fine. */
1964 rxcsum = rd32(E1000_RXCSUM);
1965 rxcsum |= E1000_RXCSUM_PCSD;
1966 wr32(E1000_RXCSUM, rxcsum);
1968 /* Enable Receive Checksum Offload for TCP and UDP */
1969 rxcsum = rd32(E1000_RXCSUM);
1970 if (adapter->rx_csum) {
1971 rxcsum |= E1000_RXCSUM_TUOFL;
1973 /* Enable IPv4 payload checksum for UDP fragments
1974 * Must be used in conjunction with packet-split. */
1975 if (adapter->rx_ps_hdr_size)
1976 rxcsum |= E1000_RXCSUM_IPPCSE;
1978 rxcsum &= ~E1000_RXCSUM_TUOFL;
1979 /* don't need to clear IPPCSE as it defaults to 0 */
1981 wr32(E1000_RXCSUM, rxcsum);
1986 adapter->max_frame_size + VLAN_TAG_SIZE);
1988 wr32(E1000_RLPML, adapter->max_frame_size);
1990 /* Enable Receives */
1991 wr32(E1000_RCTL, rctl);
1995 * igb_free_tx_resources - Free Tx Resources per Queue
1996 * @adapter: board private structure
1997 * @tx_ring: Tx descriptor ring for a specific queue
1999 * Free all transmit software resources
2001 static void igb_free_tx_resources(struct igb_ring *tx_ring)
2003 struct pci_dev *pdev = tx_ring->adapter->pdev;
2005 igb_clean_tx_ring(tx_ring);
2007 vfree(tx_ring->buffer_info);
2008 tx_ring->buffer_info = NULL;
2010 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2012 tx_ring->desc = NULL;
2016 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2017 * @adapter: board private structure
2019 * Free all transmit software resources
2021 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2025 for (i = 0; i < adapter->num_tx_queues; i++)
2026 igb_free_tx_resources(&adapter->tx_ring[i]);
2029 static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2030 struct igb_buffer *buffer_info)
2032 if (buffer_info->dma) {
2033 pci_unmap_page(adapter->pdev,
2035 buffer_info->length,
2037 buffer_info->dma = 0;
2039 if (buffer_info->skb) {
2040 dev_kfree_skb_any(buffer_info->skb);
2041 buffer_info->skb = NULL;
2043 buffer_info->time_stamp = 0;
2044 /* buffer_info must be completely set up in the transmit path */
2048 * igb_clean_tx_ring - Free Tx Buffers
2049 * @adapter: board private structure
2050 * @tx_ring: ring to be cleaned
2052 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2054 struct igb_adapter *adapter = tx_ring->adapter;
2055 struct igb_buffer *buffer_info;
2059 if (!tx_ring->buffer_info)
2061 /* Free all the Tx ring sk_buffs */
2063 for (i = 0; i < tx_ring->count; i++) {
2064 buffer_info = &tx_ring->buffer_info[i];
2065 igb_unmap_and_free_tx_resource(adapter, buffer_info);
2068 size = sizeof(struct igb_buffer) * tx_ring->count;
2069 memset(tx_ring->buffer_info, 0, size);
2071 /* Zero out the descriptor ring */
2073 memset(tx_ring->desc, 0, tx_ring->size);
2075 tx_ring->next_to_use = 0;
2076 tx_ring->next_to_clean = 0;
2078 writel(0, adapter->hw.hw_addr + tx_ring->head);
2079 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2083 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2084 * @adapter: board private structure
2086 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2090 for (i = 0; i < adapter->num_tx_queues; i++)
2091 igb_clean_tx_ring(&adapter->tx_ring[i]);
2095 * igb_free_rx_resources - Free Rx Resources
2096 * @adapter: board private structure
2097 * @rx_ring: ring to clean the resources from
2099 * Free all receive software resources
2101 static void igb_free_rx_resources(struct igb_ring *rx_ring)
2103 struct pci_dev *pdev = rx_ring->adapter->pdev;
2105 igb_clean_rx_ring(rx_ring);
2107 vfree(rx_ring->buffer_info);
2108 rx_ring->buffer_info = NULL;
2110 #ifdef CONFIG_IGB_LRO
2111 vfree(rx_ring->lro_mgr.lro_arr);
2112 rx_ring->lro_mgr.lro_arr = NULL;
2115 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2117 rx_ring->desc = NULL;
2121 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2122 * @adapter: board private structure
2124 * Free all receive software resources
2126 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2130 for (i = 0; i < adapter->num_rx_queues; i++)
2131 igb_free_rx_resources(&adapter->rx_ring[i]);
2135 * igb_clean_rx_ring - Free Rx Buffers per Queue
2136 * @adapter: board private structure
2137 * @rx_ring: ring to free buffers from
2139 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2141 struct igb_adapter *adapter = rx_ring->adapter;
2142 struct igb_buffer *buffer_info;
2143 struct pci_dev *pdev = adapter->pdev;
2147 if (!rx_ring->buffer_info)
2149 /* Free all the Rx ring sk_buffs */
2150 for (i = 0; i < rx_ring->count; i++) {
2151 buffer_info = &rx_ring->buffer_info[i];
2152 if (buffer_info->dma) {
2153 if (adapter->rx_ps_hdr_size)
2154 pci_unmap_single(pdev, buffer_info->dma,
2155 adapter->rx_ps_hdr_size,
2156 PCI_DMA_FROMDEVICE);
2158 pci_unmap_single(pdev, buffer_info->dma,
2159 adapter->rx_buffer_len,
2160 PCI_DMA_FROMDEVICE);
2161 buffer_info->dma = 0;
2164 if (buffer_info->skb) {
2165 dev_kfree_skb(buffer_info->skb);
2166 buffer_info->skb = NULL;
2168 if (buffer_info->page) {
2169 if (buffer_info->page_dma)
2170 pci_unmap_page(pdev, buffer_info->page_dma,
2172 PCI_DMA_FROMDEVICE);
2173 put_page(buffer_info->page);
2174 buffer_info->page = NULL;
2175 buffer_info->page_dma = 0;
2176 buffer_info->page_offset = 0;
2180 size = sizeof(struct igb_buffer) * rx_ring->count;
2181 memset(rx_ring->buffer_info, 0, size);
2183 /* Zero out the descriptor ring */
2184 memset(rx_ring->desc, 0, rx_ring->size);
2186 rx_ring->next_to_clean = 0;
2187 rx_ring->next_to_use = 0;
2189 writel(0, adapter->hw.hw_addr + rx_ring->head);
2190 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2194 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2195 * @adapter: board private structure
2197 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2201 for (i = 0; i < adapter->num_rx_queues; i++)
2202 igb_clean_rx_ring(&adapter->rx_ring[i]);
2206 * igb_set_mac - Change the Ethernet Address of the NIC
2207 * @netdev: network interface device structure
2208 * @p: pointer to an address structure
2210 * Returns 0 on success, negative on failure
2212 static int igb_set_mac(struct net_device *netdev, void *p)
2214 struct igb_adapter *adapter = netdev_priv(netdev);
2215 struct sockaddr *addr = p;
2217 if (!is_valid_ether_addr(addr->sa_data))
2218 return -EADDRNOTAVAIL;
2220 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2221 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
2223 adapter->hw.mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
2229 * igb_set_multi - Multicast and Promiscuous mode set
2230 * @netdev: network interface device structure
2232 * The set_multi entry point is called whenever the multicast address
2233 * list or the network interface flags are updated. This routine is
2234 * responsible for configuring the hardware for proper multicast,
2235 * promiscuous mode, and all-multi behavior.
2237 static void igb_set_multi(struct net_device *netdev)
2239 struct igb_adapter *adapter = netdev_priv(netdev);
2240 struct e1000_hw *hw = &adapter->hw;
2241 struct e1000_mac_info *mac = &hw->mac;
2242 struct dev_mc_list *mc_ptr;
2247 /* Check for Promiscuous and All Multicast modes */
2249 rctl = rd32(E1000_RCTL);
2251 if (netdev->flags & IFF_PROMISC) {
2252 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2253 rctl &= ~E1000_RCTL_VFE;
2255 if (netdev->flags & IFF_ALLMULTI) {
2256 rctl |= E1000_RCTL_MPE;
2257 rctl &= ~E1000_RCTL_UPE;
2259 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2260 rctl |= E1000_RCTL_VFE;
2262 wr32(E1000_RCTL, rctl);
2264 if (!netdev->mc_count) {
2265 /* nothing to program, so clear mc list */
2266 igb_update_mc_addr_list_82575(hw, NULL, 0, 1,
2267 mac->rar_entry_count);
2271 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2275 /* The shared function expects a packed array of only addresses. */
2276 mc_ptr = netdev->mc_list;
2278 for (i = 0; i < netdev->mc_count; i++) {
2281 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2282 mc_ptr = mc_ptr->next;
2284 igb_update_mc_addr_list_82575(hw, mta_list, i, 1,
2285 mac->rar_entry_count);
2289 /* Need to wait a few seconds after link up to get diagnostic information from
2291 static void igb_update_phy_info(unsigned long data)
2293 struct igb_adapter *adapter = (struct igb_adapter *) data;
2294 if (adapter->hw.phy.ops.get_phy_info)
2295 adapter->hw.phy.ops.get_phy_info(&adapter->hw);
2299 * igb_watchdog - Timer Call-back
2300 * @data: pointer to adapter cast into an unsigned long
2302 static void igb_watchdog(unsigned long data)
2304 struct igb_adapter *adapter = (struct igb_adapter *)data;
2305 /* Do the rest outside of interrupt context */
2306 schedule_work(&adapter->watchdog_task);
2309 static void igb_watchdog_task(struct work_struct *work)
2311 struct igb_adapter *adapter = container_of(work,
2312 struct igb_adapter, watchdog_task);
2313 struct e1000_hw *hw = &adapter->hw;
2315 struct net_device *netdev = adapter->netdev;
2316 struct igb_ring *tx_ring = adapter->tx_ring;
2317 struct e1000_mac_info *mac = &adapter->hw.mac;
2322 if ((netif_carrier_ok(netdev)) &&
2323 (rd32(E1000_STATUS) & E1000_STATUS_LU))
2326 ret_val = hw->mac.ops.check_for_link(&adapter->hw);
2327 if ((ret_val == E1000_ERR_PHY) &&
2328 (hw->phy.type == e1000_phy_igp_3) &&
2330 E1000_PHY_CTRL_GBE_DISABLE))
2331 dev_info(&adapter->pdev->dev,
2332 "Gigabit has been disabled, downgrading speed\n");
2334 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
2335 !(rd32(E1000_TXCW) & E1000_TXCW_ANE))
2336 link = mac->serdes_has_link;
2338 link = rd32(E1000_STATUS) &
2342 if (!netif_carrier_ok(netdev)) {
2344 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2345 &adapter->link_speed,
2346 &adapter->link_duplex);
2348 ctrl = rd32(E1000_CTRL);
2349 dev_info(&adapter->pdev->dev,
2350 "NIC Link is Up %d Mbps %s, "
2351 "Flow Control: %s\n",
2352 adapter->link_speed,
2353 adapter->link_duplex == FULL_DUPLEX ?
2354 "Full Duplex" : "Half Duplex",
2355 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2356 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2357 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2358 E1000_CTRL_TFCE) ? "TX" : "None")));
2360 /* tweak tx_queue_len according to speed/duplex and
2361 * adjust the timeout factor */
2362 netdev->tx_queue_len = adapter->tx_queue_len;
2363 adapter->tx_timeout_factor = 1;
2364 switch (adapter->link_speed) {
2366 netdev->tx_queue_len = 10;
2367 adapter->tx_timeout_factor = 14;
2370 netdev->tx_queue_len = 100;
2371 /* maybe add some timeout factor ? */
2375 netif_carrier_on(netdev);
2376 netif_wake_queue(netdev);
2377 for (i = 0; i < adapter->num_tx_queues; i++)
2378 netif_wake_subqueue(netdev, i);
2380 if (!test_bit(__IGB_DOWN, &adapter->state))
2381 mod_timer(&adapter->phy_info_timer,
2382 round_jiffies(jiffies + 2 * HZ));
2385 if (netif_carrier_ok(netdev)) {
2386 adapter->link_speed = 0;
2387 adapter->link_duplex = 0;
2388 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
2389 netif_carrier_off(netdev);
2390 netif_stop_queue(netdev);
2391 for (i = 0; i < adapter->num_tx_queues; i++)
2392 netif_stop_subqueue(netdev, i);
2393 if (!test_bit(__IGB_DOWN, &adapter->state))
2394 mod_timer(&adapter->phy_info_timer,
2395 round_jiffies(jiffies + 2 * HZ));
2400 igb_update_stats(adapter);
2402 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2403 adapter->tpt_old = adapter->stats.tpt;
2404 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
2405 adapter->colc_old = adapter->stats.colc;
2407 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2408 adapter->gorc_old = adapter->stats.gorc;
2409 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2410 adapter->gotc_old = adapter->stats.gotc;
2412 igb_update_adaptive(&adapter->hw);
2414 if (!netif_carrier_ok(netdev)) {
2415 if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
2416 /* We've lost link, so the controller stops DMA,
2417 * but we've got queued Tx work that's never going
2418 * to get done, so reset controller to flush Tx.
2419 * (Do the reset outside of interrupt context). */
2420 adapter->tx_timeout_count++;
2421 schedule_work(&adapter->reset_task);
2425 /* Cause software interrupt to ensure rx ring is cleaned */
2426 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2428 /* Force detection of hung controller every watchdog period */
2429 tx_ring->detect_tx_hung = true;
2431 /* Reset the timer */
2432 if (!test_bit(__IGB_DOWN, &adapter->state))
2433 mod_timer(&adapter->watchdog_timer,
2434 round_jiffies(jiffies + 2 * HZ));
2437 enum latency_range {
2441 latency_invalid = 255
2446 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2448 * Stores a new ITR value based on strictly on packet size. This
2449 * algorithm is less sophisticated than that used in igb_update_itr,
2450 * due to the difficulty of synchronizing statistics across multiple
2451 * receive rings. The divisors and thresholds used by this fuction
2452 * were determined based on theoretical maximum wire speed and testing
2453 * data, in order to minimize response time while increasing bulk
2455 * This functionality is controlled by the InterruptThrottleRate module
2456 * parameter (see igb_param.c)
2457 * NOTE: This function is called only when operating in a multiqueue
2458 * receive environment.
2459 * @rx_ring: pointer to ring
2461 static void igb_update_ring_itr(struct igb_ring *rx_ring)
2463 int new_val = rx_ring->itr_val;
2464 int avg_wire_size = 0;
2465 struct igb_adapter *adapter = rx_ring->adapter;
2467 if (!rx_ring->total_packets)
2468 goto clear_counts; /* no packets, so don't do anything */
2470 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2471 * ints/sec - ITR timer value of 120 ticks.
2473 if (adapter->link_speed != SPEED_1000) {
2477 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
2479 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2480 avg_wire_size += 24;
2482 /* Don't starve jumbo frames */
2483 avg_wire_size = min(avg_wire_size, 3000);
2485 /* Give a little boost to mid-size frames */
2486 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2487 new_val = avg_wire_size / 3;
2489 new_val = avg_wire_size / 2;
2492 if (new_val != rx_ring->itr_val) {
2493 rx_ring->itr_val = new_val;
2494 rx_ring->set_itr = 1;
2497 rx_ring->total_bytes = 0;
2498 rx_ring->total_packets = 0;
2502 * igb_update_itr - update the dynamic ITR value based on statistics
2503 * Stores a new ITR value based on packets and byte
2504 * counts during the last interrupt. The advantage of per interrupt
2505 * computation is faster updates and more accurate ITR for the current
2506 * traffic pattern. Constants in this function were computed
2507 * based on theoretical maximum wire speed and thresholds were set based
2508 * on testing data as well as attempting to minimize response time
2509 * while increasing bulk throughput.
2510 * this functionality is controlled by the InterruptThrottleRate module
2511 * parameter (see igb_param.c)
2512 * NOTE: These calculations are only valid when operating in a single-
2513 * queue environment.
2514 * @adapter: pointer to adapter
2515 * @itr_setting: current adapter->itr
2516 * @packets: the number of packets during this measurement interval
2517 * @bytes: the number of bytes during this measurement interval
2519 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2520 int packets, int bytes)
2522 unsigned int retval = itr_setting;
2525 goto update_itr_done;
2527 switch (itr_setting) {
2528 case lowest_latency:
2529 /* handle TSO and jumbo frames */
2530 if (bytes/packets > 8000)
2531 retval = bulk_latency;
2532 else if ((packets < 5) && (bytes > 512))
2533 retval = low_latency;
2535 case low_latency: /* 50 usec aka 20000 ints/s */
2536 if (bytes > 10000) {
2537 /* this if handles the TSO accounting */
2538 if (bytes/packets > 8000) {
2539 retval = bulk_latency;
2540 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2541 retval = bulk_latency;
2542 } else if ((packets > 35)) {
2543 retval = lowest_latency;
2545 } else if (bytes/packets > 2000) {
2546 retval = bulk_latency;
2547 } else if (packets <= 2 && bytes < 512) {
2548 retval = lowest_latency;
2551 case bulk_latency: /* 250 usec aka 4000 ints/s */
2552 if (bytes > 25000) {
2554 retval = low_latency;
2555 } else if (bytes < 6000) {
2556 retval = low_latency;
2565 static void igb_set_itr(struct igb_adapter *adapter)
2568 u32 new_itr = adapter->itr;
2570 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2571 if (adapter->link_speed != SPEED_1000) {
2577 adapter->rx_itr = igb_update_itr(adapter,
2579 adapter->rx_ring->total_packets,
2580 adapter->rx_ring->total_bytes);
2582 if (adapter->rx_ring->buddy) {
2583 adapter->tx_itr = igb_update_itr(adapter,
2585 adapter->tx_ring->total_packets,
2586 adapter->tx_ring->total_bytes);
2588 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2590 current_itr = adapter->rx_itr;
2593 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2594 if (adapter->itr_setting == 3 &&
2595 current_itr == lowest_latency)
2596 current_itr = low_latency;
2598 switch (current_itr) {
2599 /* counts and packets in update_itr are dependent on these numbers */
2600 case lowest_latency:
2604 new_itr = 20000; /* aka hwitr = ~200 */
2614 adapter->rx_ring->total_bytes = 0;
2615 adapter->rx_ring->total_packets = 0;
2616 if (adapter->rx_ring->buddy) {
2617 adapter->rx_ring->buddy->total_bytes = 0;
2618 adapter->rx_ring->buddy->total_packets = 0;
2621 if (new_itr != adapter->itr) {
2622 /* this attempts to bias the interrupt rate towards Bulk
2623 * by adding intermediate steps when interrupt rate is
2625 new_itr = new_itr > adapter->itr ?
2626 min(adapter->itr + (new_itr >> 2), new_itr) :
2628 /* Don't write the value here; it resets the adapter's
2629 * internal timer, and causes us to delay far longer than
2630 * we should between interrupts. Instead, we write the ITR
2631 * value at the beginning of the next interrupt so the timing
2632 * ends up being correct.
2634 adapter->itr = new_itr;
2635 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2636 adapter->rx_ring->set_itr = 1;
2643 #define IGB_TX_FLAGS_CSUM 0x00000001
2644 #define IGB_TX_FLAGS_VLAN 0x00000002
2645 #define IGB_TX_FLAGS_TSO 0x00000004
2646 #define IGB_TX_FLAGS_IPV4 0x00000008
2647 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2648 #define IGB_TX_FLAGS_VLAN_SHIFT 16
2650 static inline int igb_tso_adv(struct igb_adapter *adapter,
2651 struct igb_ring *tx_ring,
2652 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2654 struct e1000_adv_tx_context_desc *context_desc;
2657 struct igb_buffer *buffer_info;
2658 u32 info = 0, tu_cmd = 0;
2659 u32 mss_l4len_idx, l4len;
2662 if (skb_header_cloned(skb)) {
2663 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2668 l4len = tcp_hdrlen(skb);
2671 if (skb->protocol == htons(ETH_P_IP)) {
2672 struct iphdr *iph = ip_hdr(skb);
2675 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2679 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2680 ipv6_hdr(skb)->payload_len = 0;
2681 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2682 &ipv6_hdr(skb)->daddr,
2686 i = tx_ring->next_to_use;
2688 buffer_info = &tx_ring->buffer_info[i];
2689 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2690 /* VLAN MACLEN IPLEN */
2691 if (tx_flags & IGB_TX_FLAGS_VLAN)
2692 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2693 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2694 *hdr_len += skb_network_offset(skb);
2695 info |= skb_network_header_len(skb);
2696 *hdr_len += skb_network_header_len(skb);
2697 context_desc->vlan_macip_lens = cpu_to_le32(info);
2699 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2700 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2702 if (skb->protocol == htons(ETH_P_IP))
2703 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2704 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2706 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2709 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2710 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2712 /* Context index must be unique per ring. */
2713 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2714 mss_l4len_idx |= tx_ring->queue_index << 4;
2716 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2717 context_desc->seqnum_seed = 0;
2719 buffer_info->time_stamp = jiffies;
2720 buffer_info->dma = 0;
2722 if (i == tx_ring->count)
2725 tx_ring->next_to_use = i;
2730 static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2731 struct igb_ring *tx_ring,
2732 struct sk_buff *skb, u32 tx_flags)
2734 struct e1000_adv_tx_context_desc *context_desc;
2736 struct igb_buffer *buffer_info;
2737 u32 info = 0, tu_cmd = 0;
2739 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
2740 (tx_flags & IGB_TX_FLAGS_VLAN)) {
2741 i = tx_ring->next_to_use;
2742 buffer_info = &tx_ring->buffer_info[i];
2743 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2745 if (tx_flags & IGB_TX_FLAGS_VLAN)
2746 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2747 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2748 if (skb->ip_summed == CHECKSUM_PARTIAL)
2749 info |= skb_network_header_len(skb);
2751 context_desc->vlan_macip_lens = cpu_to_le32(info);
2753 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2755 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2756 switch (skb->protocol) {
2757 case __constant_htons(ETH_P_IP):
2758 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2759 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2760 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2762 case __constant_htons(ETH_P_IPV6):
2763 /* XXX what about other V6 headers?? */
2764 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2765 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2768 if (unlikely(net_ratelimit()))
2769 dev_warn(&adapter->pdev->dev,
2770 "partial checksum but proto=%x!\n",
2776 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2777 context_desc->seqnum_seed = 0;
2778 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2779 context_desc->mss_l4len_idx =
2780 cpu_to_le32(tx_ring->queue_index << 4);
2782 buffer_info->time_stamp = jiffies;
2783 buffer_info->dma = 0;
2786 if (i == tx_ring->count)
2788 tx_ring->next_to_use = i;
2797 #define IGB_MAX_TXD_PWR 16
2798 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
2800 static inline int igb_tx_map_adv(struct igb_adapter *adapter,
2801 struct igb_ring *tx_ring,
2802 struct sk_buff *skb)
2804 struct igb_buffer *buffer_info;
2805 unsigned int len = skb_headlen(skb);
2806 unsigned int count = 0, i;
2809 i = tx_ring->next_to_use;
2811 buffer_info = &tx_ring->buffer_info[i];
2812 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2813 buffer_info->length = len;
2814 /* set time_stamp *before* dma to help avoid a possible race */
2815 buffer_info->time_stamp = jiffies;
2816 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
2820 if (i == tx_ring->count)
2823 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2824 struct skb_frag_struct *frag;
2826 frag = &skb_shinfo(skb)->frags[f];
2829 buffer_info = &tx_ring->buffer_info[i];
2830 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
2831 buffer_info->length = len;
2832 buffer_info->time_stamp = jiffies;
2833 buffer_info->dma = pci_map_page(adapter->pdev,
2841 if (i == tx_ring->count)
2845 i = (i == 0) ? tx_ring->count - 1 : i - 1;
2846 tx_ring->buffer_info[i].skb = skb;
2851 static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
2852 struct igb_ring *tx_ring,
2853 int tx_flags, int count, u32 paylen,
2856 union e1000_adv_tx_desc *tx_desc = NULL;
2857 struct igb_buffer *buffer_info;
2858 u32 olinfo_status = 0, cmd_type_len;
2861 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2862 E1000_ADVTXD_DCMD_DEXT);
2864 if (tx_flags & IGB_TX_FLAGS_VLAN)
2865 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2867 if (tx_flags & IGB_TX_FLAGS_TSO) {
2868 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2870 /* insert tcp checksum */
2871 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2873 /* insert ip checksum */
2874 if (tx_flags & IGB_TX_FLAGS_IPV4)
2875 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2877 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
2878 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2881 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
2882 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
2883 IGB_TX_FLAGS_VLAN)))
2884 olinfo_status |= tx_ring->queue_index << 4;
2886 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2888 i = tx_ring->next_to_use;
2890 buffer_info = &tx_ring->buffer_info[i];
2891 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
2892 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2893 tx_desc->read.cmd_type_len =
2894 cpu_to_le32(cmd_type_len | buffer_info->length);
2895 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2897 if (i == tx_ring->count)
2901 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2902 /* Force memory writes to complete before letting h/w
2903 * know there are new descriptors to fetch. (Only
2904 * applicable for weak-ordered memory model archs,
2905 * such as IA-64). */
2908 tx_ring->next_to_use = i;
2909 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2910 /* we need this if more than one processor can write to our tail
2911 * at a time, it syncronizes IO on IA64/Altix systems */
2915 static int __igb_maybe_stop_tx(struct net_device *netdev,
2916 struct igb_ring *tx_ring, int size)
2918 struct igb_adapter *adapter = netdev_priv(netdev);
2920 netif_stop_subqueue(netdev, tx_ring->queue_index);
2922 /* Herbert's original patch had:
2923 * smp_mb__after_netif_stop_queue();
2924 * but since that doesn't exist yet, just open code it. */
2927 /* We need to check again in a case another CPU has just
2928 * made room available. */
2929 if (IGB_DESC_UNUSED(tx_ring) < size)
2933 netif_wake_subqueue(netdev, tx_ring->queue_index);
2934 ++adapter->restart_queue;
2938 static int igb_maybe_stop_tx(struct net_device *netdev,
2939 struct igb_ring *tx_ring, int size)
2941 if (IGB_DESC_UNUSED(tx_ring) >= size)
2943 return __igb_maybe_stop_tx(netdev, tx_ring, size);
2946 #define TXD_USE_COUNT(S) (((S) >> (IGB_MAX_TXD_PWR)) + 1)
2948 static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
2949 struct net_device *netdev,
2950 struct igb_ring *tx_ring)
2952 struct igb_adapter *adapter = netdev_priv(netdev);
2953 unsigned int tx_flags = 0;
2958 len = skb_headlen(skb);
2960 if (test_bit(__IGB_DOWN, &adapter->state)) {
2961 dev_kfree_skb_any(skb);
2962 return NETDEV_TX_OK;
2965 if (skb->len <= 0) {
2966 dev_kfree_skb_any(skb);
2967 return NETDEV_TX_OK;
2970 /* need: 1 descriptor per page,
2971 * + 2 desc gap to keep tail from touching head,
2972 * + 1 desc for skb->data,
2973 * + 1 desc for context descriptor,
2974 * otherwise try next time */
2975 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
2976 /* this is a hard error */
2977 return NETDEV_TX_BUSY;
2981 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
2982 tx_flags |= IGB_TX_FLAGS_VLAN;
2983 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
2986 if (skb->protocol == htons(ETH_P_IP))
2987 tx_flags |= IGB_TX_FLAGS_IPV4;
2989 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
2993 dev_kfree_skb_any(skb);
2994 return NETDEV_TX_OK;
2998 tx_flags |= IGB_TX_FLAGS_TSO;
2999 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags))
3000 if (skb->ip_summed == CHECKSUM_PARTIAL)
3001 tx_flags |= IGB_TX_FLAGS_CSUM;
3003 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
3004 igb_tx_map_adv(adapter, tx_ring, skb),
3007 netdev->trans_start = jiffies;
3009 /* Make sure there is space in the ring for the next send. */
3010 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3012 return NETDEV_TX_OK;
3015 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3017 struct igb_adapter *adapter = netdev_priv(netdev);
3018 struct igb_ring *tx_ring;
3021 r_idx = skb->queue_mapping & (IGB_MAX_TX_QUEUES - 1);
3022 tx_ring = adapter->multi_tx_table[r_idx];
3024 /* This goes back to the question of how to logically map a tx queue
3025 * to a flow. Right now, performance is impacted slightly negatively
3026 * if using multiple tx queues. If the stack breaks away from a
3027 * single qdisc implementation, we can look at this again. */
3028 return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
3032 * igb_tx_timeout - Respond to a Tx Hang
3033 * @netdev: network interface device structure
3035 static void igb_tx_timeout(struct net_device *netdev)
3037 struct igb_adapter *adapter = netdev_priv(netdev);
3038 struct e1000_hw *hw = &adapter->hw;
3040 /* Do the reset outside of interrupt context */
3041 adapter->tx_timeout_count++;
3042 schedule_work(&adapter->reset_task);
3043 wr32(E1000_EICS, adapter->eims_enable_mask &
3044 ~(E1000_EIMS_TCP_TIMER | E1000_EIMS_OTHER));
3047 static void igb_reset_task(struct work_struct *work)
3049 struct igb_adapter *adapter;
3050 adapter = container_of(work, struct igb_adapter, reset_task);
3052 igb_reinit_locked(adapter);
3056 * igb_get_stats - Get System Network Statistics
3057 * @netdev: network interface device structure
3059 * Returns the address of the device statistics structure.
3060 * The statistics are actually updated from the timer callback.
3062 static struct net_device_stats *
3063 igb_get_stats(struct net_device *netdev)
3065 struct igb_adapter *adapter = netdev_priv(netdev);
3067 /* only return the current stats */
3068 return &adapter->net_stats;
3072 * igb_change_mtu - Change the Maximum Transfer Unit
3073 * @netdev: network interface device structure
3074 * @new_mtu: new value for maximum frame size
3076 * Returns 0 on success, negative on failure
3078 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3080 struct igb_adapter *adapter = netdev_priv(netdev);
3081 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3083 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3084 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3085 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3089 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3090 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3091 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3095 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3097 /* igb_down has a dependency on max_frame_size */
3098 adapter->max_frame_size = max_frame;
3099 if (netif_running(netdev))
3102 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3103 * means we reserve 2 more, this pushes us to allocate from the next
3105 * i.e. RXBUFFER_2048 --> size-4096 slab
3108 if (max_frame <= IGB_RXBUFFER_256)
3109 adapter->rx_buffer_len = IGB_RXBUFFER_256;
3110 else if (max_frame <= IGB_RXBUFFER_512)
3111 adapter->rx_buffer_len = IGB_RXBUFFER_512;
3112 else if (max_frame <= IGB_RXBUFFER_1024)
3113 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3114 else if (max_frame <= IGB_RXBUFFER_2048)
3115 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3117 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3118 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3120 adapter->rx_buffer_len = PAGE_SIZE / 2;
3122 /* adjust allocation if LPE protects us, and we aren't using SBP */
3123 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3124 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3125 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3127 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3128 netdev->mtu, new_mtu);
3129 netdev->mtu = new_mtu;
3131 if (netif_running(netdev))
3136 clear_bit(__IGB_RESETTING, &adapter->state);
3142 * igb_update_stats - Update the board statistics counters
3143 * @adapter: board private structure
3146 void igb_update_stats(struct igb_adapter *adapter)
3148 struct e1000_hw *hw = &adapter->hw;
3149 struct pci_dev *pdev = adapter->pdev;
3152 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3155 * Prevent stats update while adapter is being reset, or if the pci
3156 * connection is down.
3158 if (adapter->link_speed == 0)
3160 if (pci_channel_offline(pdev))
3163 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3164 adapter->stats.gprc += rd32(E1000_GPRC);
3165 adapter->stats.gorc += rd32(E1000_GORCL);
3166 rd32(E1000_GORCH); /* clear GORCL */
3167 adapter->stats.bprc += rd32(E1000_BPRC);
3168 adapter->stats.mprc += rd32(E1000_MPRC);
3169 adapter->stats.roc += rd32(E1000_ROC);
3171 adapter->stats.prc64 += rd32(E1000_PRC64);
3172 adapter->stats.prc127 += rd32(E1000_PRC127);
3173 adapter->stats.prc255 += rd32(E1000_PRC255);
3174 adapter->stats.prc511 += rd32(E1000_PRC511);
3175 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3176 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3177 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3178 adapter->stats.sec += rd32(E1000_SEC);
3180 adapter->stats.mpc += rd32(E1000_MPC);
3181 adapter->stats.scc += rd32(E1000_SCC);
3182 adapter->stats.ecol += rd32(E1000_ECOL);
3183 adapter->stats.mcc += rd32(E1000_MCC);
3184 adapter->stats.latecol += rd32(E1000_LATECOL);
3185 adapter->stats.dc += rd32(E1000_DC);
3186 adapter->stats.rlec += rd32(E1000_RLEC);
3187 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3188 adapter->stats.xontxc += rd32(E1000_XONTXC);
3189 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3190 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3191 adapter->stats.fcruc += rd32(E1000_FCRUC);
3192 adapter->stats.gptc += rd32(E1000_GPTC);
3193 adapter->stats.gotc += rd32(E1000_GOTCL);
3194 rd32(E1000_GOTCH); /* clear GOTCL */
3195 adapter->stats.rnbc += rd32(E1000_RNBC);
3196 adapter->stats.ruc += rd32(E1000_RUC);
3197 adapter->stats.rfc += rd32(E1000_RFC);
3198 adapter->stats.rjc += rd32(E1000_RJC);
3199 adapter->stats.tor += rd32(E1000_TORH);
3200 adapter->stats.tot += rd32(E1000_TOTH);
3201 adapter->stats.tpr += rd32(E1000_TPR);
3203 adapter->stats.ptc64 += rd32(E1000_PTC64);
3204 adapter->stats.ptc127 += rd32(E1000_PTC127);
3205 adapter->stats.ptc255 += rd32(E1000_PTC255);
3206 adapter->stats.ptc511 += rd32(E1000_PTC511);
3207 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3208 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3210 adapter->stats.mptc += rd32(E1000_MPTC);
3211 adapter->stats.bptc += rd32(E1000_BPTC);
3213 /* used for adaptive IFS */
3215 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3216 adapter->stats.tpt += hw->mac.tx_packet_delta;
3217 hw->mac.collision_delta = rd32(E1000_COLC);
3218 adapter->stats.colc += hw->mac.collision_delta;
3220 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3221 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3222 adapter->stats.tncrs += rd32(E1000_TNCRS);
3223 adapter->stats.tsctc += rd32(E1000_TSCTC);
3224 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3226 adapter->stats.iac += rd32(E1000_IAC);
3227 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3228 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3229 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3230 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3231 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3232 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3233 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3234 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3236 /* Fill out the OS statistics structure */
3237 adapter->net_stats.multicast = adapter->stats.mprc;
3238 adapter->net_stats.collisions = adapter->stats.colc;
3242 /* RLEC on some newer hardware can be incorrect so build
3243 * our own version based on RUC and ROC */
3244 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3245 adapter->stats.crcerrs + adapter->stats.algnerrc +
3246 adapter->stats.ruc + adapter->stats.roc +
3247 adapter->stats.cexterr;
3248 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3250 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3251 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3252 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3255 adapter->net_stats.tx_errors = adapter->stats.ecol +
3256 adapter->stats.latecol;
3257 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3258 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3259 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3261 /* Tx Dropped needs to be maintained elsewhere */
3264 if (hw->phy.media_type == e1000_media_type_copper) {
3265 if ((adapter->link_speed == SPEED_1000) &&
3266 (!hw->phy.ops.read_phy_reg(hw, PHY_1000T_STATUS,
3268 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3269 adapter->phy_stats.idle_errors += phy_tmp;
3273 /* Management Stats */
3274 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3275 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3276 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3280 static irqreturn_t igb_msix_other(int irq, void *data)
3282 struct net_device *netdev = data;
3283 struct igb_adapter *adapter = netdev_priv(netdev);
3284 struct e1000_hw *hw = &adapter->hw;
3285 u32 icr = rd32(E1000_ICR);
3287 /* reading ICR causes bit 31 of EICR to be cleared */
3288 if (!(icr & E1000_ICR_LSC))
3289 goto no_link_interrupt;
3290 hw->mac.get_link_status = 1;
3291 /* guard against interrupt when we're going down */
3292 if (!test_bit(__IGB_DOWN, &adapter->state))
3293 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3296 wr32(E1000_IMS, E1000_IMS_LSC);
3297 wr32(E1000_EIMS, adapter->eims_other);
3302 static irqreturn_t igb_msix_tx(int irq, void *data)
3304 struct igb_ring *tx_ring = data;
3305 struct igb_adapter *adapter = tx_ring->adapter;
3306 struct e1000_hw *hw = &adapter->hw;
3309 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3310 igb_update_tx_dca(tx_ring);
3312 tx_ring->total_bytes = 0;
3313 tx_ring->total_packets = 0;
3315 /* auto mask will automatically reenable the interrupt when we write
3317 if (!igb_clean_tx_irq(tx_ring))
3318 /* Ring was not completely cleaned, so fire another interrupt */
3319 wr32(E1000_EICS, tx_ring->eims_value);
3321 wr32(E1000_EIMS, tx_ring->eims_value);
3326 static void igb_write_itr(struct igb_ring *ring)
3328 struct e1000_hw *hw = &ring->adapter->hw;
3329 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3330 switch (hw->mac.type) {
3332 wr32(ring->itr_register,
3337 wr32(ring->itr_register,
3339 (ring->itr_val << 16));
3346 static irqreturn_t igb_msix_rx(int irq, void *data)
3348 struct igb_ring *rx_ring = data;
3349 struct igb_adapter *adapter = rx_ring->adapter;
3351 /* Write the ITR value calculated at the end of the
3352 * previous interrupt.
3355 igb_write_itr(rx_ring);
3357 if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
3358 __netif_rx_schedule(adapter->netdev, &rx_ring->napi);
3361 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3362 igb_update_rx_dca(rx_ring);
3368 static void igb_update_rx_dca(struct igb_ring *rx_ring)
3371 struct igb_adapter *adapter = rx_ring->adapter;
3372 struct e1000_hw *hw = &adapter->hw;
3373 int cpu = get_cpu();
3374 int q = rx_ring - adapter->rx_ring;
3376 if (rx_ring->cpu != cpu) {
3377 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
3378 if (hw->mac.type == e1000_82576) {
3379 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
3380 dca_rxctrl |= dca_get_tag(cpu) <<
3381 E1000_DCA_RXCTRL_CPUID_SHIFT;
3383 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3384 dca_rxctrl |= dca_get_tag(cpu);
3386 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3387 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3388 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3389 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3395 static void igb_update_tx_dca(struct igb_ring *tx_ring)
3398 struct igb_adapter *adapter = tx_ring->adapter;
3399 struct e1000_hw *hw = &adapter->hw;
3400 int cpu = get_cpu();
3401 int q = tx_ring - adapter->tx_ring;
3403 if (tx_ring->cpu != cpu) {
3404 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3405 if (hw->mac.type == e1000_82576) {
3406 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3407 dca_txctrl |= dca_get_tag(cpu) <<
3408 E1000_DCA_TXCTRL_CPUID_SHIFT;
3410 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3411 dca_txctrl |= dca_get_tag(cpu);
3413 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3414 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3420 static void igb_setup_dca(struct igb_adapter *adapter)
3424 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
3427 for (i = 0; i < adapter->num_tx_queues; i++) {
3428 adapter->tx_ring[i].cpu = -1;
3429 igb_update_tx_dca(&adapter->tx_ring[i]);
3431 for (i = 0; i < adapter->num_rx_queues; i++) {
3432 adapter->rx_ring[i].cpu = -1;
3433 igb_update_rx_dca(&adapter->rx_ring[i]);
3437 static int __igb_notify_dca(struct device *dev, void *data)
3439 struct net_device *netdev = dev_get_drvdata(dev);
3440 struct igb_adapter *adapter = netdev_priv(netdev);
3441 struct e1000_hw *hw = &adapter->hw;
3442 unsigned long event = *(unsigned long *)data;
3444 if (!(adapter->flags & IGB_FLAG_HAS_DCA))
3448 case DCA_PROVIDER_ADD:
3449 /* if already enabled, don't do it again */
3450 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3452 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3453 /* Always use CB2 mode, difference is masked
3454 * in the CB driver. */
3455 wr32(E1000_DCA_CTRL, 2);
3456 if (dca_add_requester(dev) == 0) {
3457 dev_info(&adapter->pdev->dev, "DCA enabled\n");
3458 igb_setup_dca(adapter);
3461 /* Fall Through since DCA is disabled. */
3462 case DCA_PROVIDER_REMOVE:
3463 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3464 /* without this a class_device is left
3465 * hanging around in the sysfs model */
3466 dca_remove_requester(dev);
3467 dev_info(&adapter->pdev->dev, "DCA disabled\n");
3468 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3469 wr32(E1000_DCA_CTRL, 1);
3477 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3482 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
3485 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3487 #endif /* CONFIG_DCA */
3490 * igb_intr_msi - Interrupt Handler
3491 * @irq: interrupt number
3492 * @data: pointer to a network interface device structure
3494 static irqreturn_t igb_intr_msi(int irq, void *data)
3496 struct net_device *netdev = data;
3497 struct igb_adapter *adapter = netdev_priv(netdev);
3498 struct e1000_hw *hw = &adapter->hw;
3499 /* read ICR disables interrupts using IAM */
3500 u32 icr = rd32(E1000_ICR);
3502 igb_write_itr(adapter->rx_ring);
3504 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3505 hw->mac.get_link_status = 1;
3506 if (!test_bit(__IGB_DOWN, &adapter->state))
3507 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3510 netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
3516 * igb_intr - Interrupt Handler
3517 * @irq: interrupt number
3518 * @data: pointer to a network interface device structure
3520 static irqreturn_t igb_intr(int irq, void *data)
3522 struct net_device *netdev = data;
3523 struct igb_adapter *adapter = netdev_priv(netdev);
3524 struct e1000_hw *hw = &adapter->hw;
3525 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3526 * need for the IMC write */
3527 u32 icr = rd32(E1000_ICR);
3530 return IRQ_NONE; /* Not our interrupt */
3532 igb_write_itr(adapter->rx_ring);
3534 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3535 * not set, then the adapter didn't send an interrupt */
3536 if (!(icr & E1000_ICR_INT_ASSERTED))
3539 eicr = rd32(E1000_EICR);
3541 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3542 hw->mac.get_link_status = 1;
3543 /* guard against interrupt when we're going down */
3544 if (!test_bit(__IGB_DOWN, &adapter->state))
3545 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3548 netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
3554 * igb_poll - NAPI Rx polling callback
3555 * @napi: napi polling structure
3556 * @budget: count of how many packets we should handle
3558 static int igb_poll(struct napi_struct *napi, int budget)
3560 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3561 struct igb_adapter *adapter = rx_ring->adapter;
3562 struct net_device *netdev = adapter->netdev;
3563 int tx_clean_complete, work_done = 0;
3565 /* this poll routine only supports one tx and one rx queue */
3567 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3568 igb_update_tx_dca(&adapter->tx_ring[0]);
3570 tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
3573 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3574 igb_update_rx_dca(&adapter->rx_ring[0]);
3576 igb_clean_rx_irq_adv(&adapter->rx_ring[0], &work_done, budget);
3578 /* If no Tx and not enough Rx work done, exit the polling mode */
3579 if ((tx_clean_complete && (work_done < budget)) ||
3580 !netif_running(netdev)) {
3581 if (adapter->itr_setting & 3)
3582 igb_set_itr(adapter);
3583 netif_rx_complete(netdev, napi);
3584 if (!test_bit(__IGB_DOWN, &adapter->state))
3585 igb_irq_enable(adapter);
3592 static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
3594 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
3595 struct igb_adapter *adapter = rx_ring->adapter;
3596 struct e1000_hw *hw = &adapter->hw;
3597 struct net_device *netdev = adapter->netdev;
3600 /* Keep link state information with original netdev */
3601 if (!netif_carrier_ok(netdev))
3605 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3606 igb_update_rx_dca(rx_ring);
3608 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
3611 /* If not enough Rx work done, exit the polling mode */
3612 if ((work_done == 0) || !netif_running(netdev)) {
3614 netif_rx_complete(netdev, napi);
3616 if (adapter->itr_setting & 3) {
3617 if (adapter->num_rx_queues == 1)
3618 igb_set_itr(adapter);
3620 igb_update_ring_itr(rx_ring);
3623 if (!test_bit(__IGB_DOWN, &adapter->state))
3624 wr32(E1000_EIMS, rx_ring->eims_value);
3632 static inline u32 get_head(struct igb_ring *tx_ring)
3634 void *end = (struct e1000_tx_desc *)tx_ring->desc + tx_ring->count;
3635 return le32_to_cpu(*(volatile __le32 *)end);
3639 * igb_clean_tx_irq - Reclaim resources after transmit completes
3640 * @adapter: board private structure
3641 * returns true if ring is completely cleaned
3643 static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
3645 struct igb_adapter *adapter = tx_ring->adapter;
3646 struct e1000_hw *hw = &adapter->hw;
3647 struct net_device *netdev = adapter->netdev;
3648 struct e1000_tx_desc *tx_desc;
3649 struct igb_buffer *buffer_info;
3650 struct sk_buff *skb;
3653 unsigned int count = 0;
3654 bool cleaned = false;
3656 unsigned int total_bytes = 0, total_packets = 0;
3659 head = get_head(tx_ring);
3660 i = tx_ring->next_to_clean;
3664 tx_desc = E1000_TX_DESC(*tx_ring, i);
3665 buffer_info = &tx_ring->buffer_info[i];
3666 skb = buffer_info->skb;
3669 unsigned int segs, bytecount;
3670 /* gso_segs is currently only valid for tcp */
3671 segs = skb_shinfo(skb)->gso_segs ?: 1;
3672 /* multiply data chunks by size of headers */
3673 bytecount = ((segs - 1) * skb_headlen(skb)) +
3675 total_packets += segs;
3676 total_bytes += bytecount;
3679 igb_unmap_and_free_tx_resource(adapter, buffer_info);
3680 tx_desc->upper.data = 0;
3683 if (i == tx_ring->count)
3687 if (count == IGB_MAX_TX_CLEAN) {
3694 head = get_head(tx_ring);
3695 if (head == oldhead)
3700 tx_ring->next_to_clean = i;
3702 if (unlikely(cleaned &&
3703 netif_carrier_ok(netdev) &&
3704 IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
3705 /* Make sure that anybody stopping the queue after this
3706 * sees the new next_to_clean.
3709 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
3710 !(test_bit(__IGB_DOWN, &adapter->state))) {
3711 netif_wake_subqueue(netdev, tx_ring->queue_index);
3712 ++adapter->restart_queue;
3716 if (tx_ring->detect_tx_hung) {
3717 /* Detect a transmit hang in hardware, this serializes the
3718 * check with the clearing of time_stamp and movement of i */
3719 tx_ring->detect_tx_hung = false;
3720 if (tx_ring->buffer_info[i].time_stamp &&
3721 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
3722 (adapter->tx_timeout_factor * HZ))
3723 && !(rd32(E1000_STATUS) &
3724 E1000_STATUS_TXOFF)) {
3726 tx_desc = E1000_TX_DESC(*tx_ring, i);
3727 /* detected Tx unit hang */
3728 dev_err(&adapter->pdev->dev,
3729 "Detected Tx Unit Hang\n"
3733 " next_to_use <%x>\n"
3734 " next_to_clean <%x>\n"
3736 "buffer_info[next_to_clean]\n"
3737 " time_stamp <%lx>\n"
3739 " desc.status <%x>\n",
3740 tx_ring->queue_index,
3741 readl(adapter->hw.hw_addr + tx_ring->head),
3742 readl(adapter->hw.hw_addr + tx_ring->tail),
3743 tx_ring->next_to_use,
3744 tx_ring->next_to_clean,
3746 tx_ring->buffer_info[i].time_stamp,
3748 tx_desc->upper.fields.status);
3749 netif_stop_subqueue(netdev, tx_ring->queue_index);
3752 tx_ring->total_bytes += total_bytes;
3753 tx_ring->total_packets += total_packets;
3754 tx_ring->tx_stats.bytes += total_bytes;
3755 tx_ring->tx_stats.packets += total_packets;
3756 adapter->net_stats.tx_bytes += total_bytes;
3757 adapter->net_stats.tx_packets += total_packets;
3761 #ifdef CONFIG_IGB_LRO
3763 * igb_get_skb_hdr - helper function for LRO header processing
3764 * @skb: pointer to sk_buff to be added to LRO packet
3765 * @iphdr: pointer to ip header structure
3766 * @tcph: pointer to tcp header structure
3767 * @hdr_flags: pointer to header flags
3768 * @priv: pointer to the receive descriptor for the current sk_buff
3770 static int igb_get_skb_hdr(struct sk_buff *skb, void **iphdr, void **tcph,
3771 u64 *hdr_flags, void *priv)
3773 union e1000_adv_rx_desc *rx_desc = priv;
3774 u16 pkt_type = rx_desc->wb.lower.lo_dword.pkt_info &
3775 (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP);
3777 /* Verify that this is a valid IPv4 TCP packet */
3778 if (pkt_type != (E1000_RXDADV_PKTTYPE_IPV4 |
3779 E1000_RXDADV_PKTTYPE_TCP))
3782 /* Set network headers */
3783 skb_reset_network_header(skb);
3784 skb_set_transport_header(skb, ip_hdrlen(skb));
3785 *iphdr = ip_hdr(skb);
3786 *tcph = tcp_hdr(skb);
3787 *hdr_flags = LRO_IPV4 | LRO_TCP;
3792 #endif /* CONFIG_IGB_LRO */
3795 * igb_receive_skb - helper function to handle rx indications
3796 * @ring: pointer to receive ring receving this packet
3797 * @status: descriptor status field as written by hardware
3798 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3799 * @skb: pointer to sk_buff to be indicated to stack
3801 static void igb_receive_skb(struct igb_ring *ring, u8 status,
3802 union e1000_adv_rx_desc * rx_desc,
3803 struct sk_buff *skb)
3805 struct igb_adapter * adapter = ring->adapter;
3806 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
3808 #ifdef CONFIG_IGB_LRO
3809 if (adapter->netdev->features & NETIF_F_LRO &&
3810 skb->ip_summed == CHECKSUM_UNNECESSARY) {
3812 lro_vlan_hwaccel_receive_skb(&ring->lro_mgr, skb,
3814 le16_to_cpu(rx_desc->wb.upper.vlan),
3817 lro_receive_skb(&ring->lro_mgr,skb, rx_desc);
3822 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
3823 le16_to_cpu(rx_desc->wb.upper.vlan));
3826 netif_receive_skb(skb);
3827 #ifdef CONFIG_IGB_LRO
3833 static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
3834 u32 status_err, struct sk_buff *skb)
3836 skb->ip_summed = CHECKSUM_NONE;
3838 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
3839 if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
3841 /* TCP/UDP checksum error bit is set */
3843 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
3844 /* let the stack verify checksum errors */
3845 adapter->hw_csum_err++;
3848 /* It must be a TCP or UDP packet with a valid checksum */
3849 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
3850 skb->ip_summed = CHECKSUM_UNNECESSARY;
3852 adapter->hw_csum_good++;
3855 static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
3856 int *work_done, int budget)
3858 struct igb_adapter *adapter = rx_ring->adapter;
3859 struct net_device *netdev = adapter->netdev;
3860 struct pci_dev *pdev = adapter->pdev;
3861 union e1000_adv_rx_desc *rx_desc , *next_rxd;
3862 struct igb_buffer *buffer_info , *next_buffer;
3863 struct sk_buff *skb;
3865 u32 length, hlen, staterr;
3866 bool cleaned = false;
3867 int cleaned_count = 0;
3868 unsigned int total_bytes = 0, total_packets = 0;
3870 i = rx_ring->next_to_clean;
3871 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
3872 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3874 while (staterr & E1000_RXD_STAT_DD) {
3875 if (*work_done >= budget)
3878 buffer_info = &rx_ring->buffer_info[i];
3880 /* HW will not DMA in data larger than the given buffer, even
3881 * if it parses the (NFS, of course) header to be larger. In
3882 * that case, it fills the header buffer and spills the rest
3885 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
3886 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
3887 if (hlen > adapter->rx_ps_hdr_size)
3888 hlen = adapter->rx_ps_hdr_size;
3890 length = le16_to_cpu(rx_desc->wb.upper.length);
3894 skb = buffer_info->skb;
3895 prefetch(skb->data - NET_IP_ALIGN);
3896 buffer_info->skb = NULL;
3897 if (!adapter->rx_ps_hdr_size) {
3898 pci_unmap_single(pdev, buffer_info->dma,
3899 adapter->rx_buffer_len +
3901 PCI_DMA_FROMDEVICE);
3902 skb_put(skb, length);
3906 if (!skb_shinfo(skb)->nr_frags) {
3907 pci_unmap_single(pdev, buffer_info->dma,
3908 adapter->rx_ps_hdr_size +
3910 PCI_DMA_FROMDEVICE);
3915 pci_unmap_page(pdev, buffer_info->page_dma,
3916 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
3917 buffer_info->page_dma = 0;
3919 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
3921 buffer_info->page_offset,
3924 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
3925 (page_count(buffer_info->page) != 1))
3926 buffer_info->page = NULL;
3928 get_page(buffer_info->page);
3931 skb->data_len += length;
3933 skb->truesize += length;
3937 if (i == rx_ring->count)
3939 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
3941 next_buffer = &rx_ring->buffer_info[i];
3943 if (!(staterr & E1000_RXD_STAT_EOP)) {
3944 buffer_info->skb = xchg(&next_buffer->skb, skb);
3945 buffer_info->dma = xchg(&next_buffer->dma, 0);
3949 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
3950 dev_kfree_skb_irq(skb);
3954 total_bytes += skb->len;
3957 igb_rx_checksum_adv(adapter, staterr, skb);
3959 skb->protocol = eth_type_trans(skb, netdev);
3961 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
3963 netdev->last_rx = jiffies;
3966 rx_desc->wb.upper.status_error = 0;
3968 /* return some buffers to hardware, one at a time is too slow */
3969 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
3970 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
3974 /* use prefetched values */
3976 buffer_info = next_buffer;
3978 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
3981 rx_ring->next_to_clean = i;
3982 cleaned_count = IGB_DESC_UNUSED(rx_ring);
3984 #ifdef CONFIG_IGB_LRO
3985 if (rx_ring->lro_used) {
3986 lro_flush_all(&rx_ring->lro_mgr);
3987 rx_ring->lro_used = 0;
3992 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
3994 rx_ring->total_packets += total_packets;
3995 rx_ring->total_bytes += total_bytes;
3996 rx_ring->rx_stats.packets += total_packets;
3997 rx_ring->rx_stats.bytes += total_bytes;
3998 adapter->net_stats.rx_bytes += total_bytes;
3999 adapter->net_stats.rx_packets += total_packets;
4005 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4006 * @adapter: address of board private structure
4008 static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4011 struct igb_adapter *adapter = rx_ring->adapter;
4012 struct net_device *netdev = adapter->netdev;
4013 struct pci_dev *pdev = adapter->pdev;
4014 union e1000_adv_rx_desc *rx_desc;
4015 struct igb_buffer *buffer_info;
4016 struct sk_buff *skb;
4019 i = rx_ring->next_to_use;
4020 buffer_info = &rx_ring->buffer_info[i];
4022 while (cleaned_count--) {
4023 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4025 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
4026 if (!buffer_info->page) {
4027 buffer_info->page = alloc_page(GFP_ATOMIC);
4028 if (!buffer_info->page) {
4029 adapter->alloc_rx_buff_failed++;
4032 buffer_info->page_offset = 0;
4034 buffer_info->page_offset ^= PAGE_SIZE / 2;
4036 buffer_info->page_dma =
4039 buffer_info->page_offset,
4041 PCI_DMA_FROMDEVICE);
4044 if (!buffer_info->skb) {
4047 if (adapter->rx_ps_hdr_size)
4048 bufsz = adapter->rx_ps_hdr_size;
4050 bufsz = adapter->rx_buffer_len;
4051 bufsz += NET_IP_ALIGN;
4052 skb = netdev_alloc_skb(netdev, bufsz);
4055 adapter->alloc_rx_buff_failed++;
4059 /* Make buffer alignment 2 beyond a 16 byte boundary
4060 * this will result in a 16 byte aligned IP header after
4061 * the 14 byte MAC header is removed
4063 skb_reserve(skb, NET_IP_ALIGN);
4065 buffer_info->skb = skb;
4066 buffer_info->dma = pci_map_single(pdev, skb->data,
4068 PCI_DMA_FROMDEVICE);
4071 /* Refresh the desc even if buffer_addrs didn't change because
4072 * each write-back erases this info. */
4073 if (adapter->rx_ps_hdr_size) {
4074 rx_desc->read.pkt_addr =
4075 cpu_to_le64(buffer_info->page_dma);
4076 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4078 rx_desc->read.pkt_addr =
4079 cpu_to_le64(buffer_info->dma);
4080 rx_desc->read.hdr_addr = 0;
4084 if (i == rx_ring->count)
4086 buffer_info = &rx_ring->buffer_info[i];
4090 if (rx_ring->next_to_use != i) {
4091 rx_ring->next_to_use = i;
4093 i = (rx_ring->count - 1);
4097 /* Force memory writes to complete before letting h/w
4098 * know there are new descriptors to fetch. (Only
4099 * applicable for weak-ordered memory model archs,
4100 * such as IA-64). */
4102 writel(i, adapter->hw.hw_addr + rx_ring->tail);
4112 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4114 struct igb_adapter *adapter = netdev_priv(netdev);
4115 struct mii_ioctl_data *data = if_mii(ifr);
4117 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4122 data->phy_id = adapter->hw.phy.addr;
4125 if (!capable(CAP_NET_ADMIN))
4127 if (adapter->hw.phy.ops.read_phy_reg(&adapter->hw,
4129 & 0x1F, &data->val_out))
4145 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4151 return igb_mii_ioctl(netdev, ifr, cmd);
4157 static void igb_vlan_rx_register(struct net_device *netdev,
4158 struct vlan_group *grp)
4160 struct igb_adapter *adapter = netdev_priv(netdev);
4161 struct e1000_hw *hw = &adapter->hw;
4164 igb_irq_disable(adapter);
4165 adapter->vlgrp = grp;
4168 /* enable VLAN tag insert/strip */
4169 ctrl = rd32(E1000_CTRL);
4170 ctrl |= E1000_CTRL_VME;
4171 wr32(E1000_CTRL, ctrl);
4173 /* enable VLAN receive filtering */
4174 rctl = rd32(E1000_RCTL);
4175 rctl &= ~E1000_RCTL_CFIEN;
4176 wr32(E1000_RCTL, rctl);
4177 igb_update_mng_vlan(adapter);
4179 adapter->max_frame_size + VLAN_TAG_SIZE);
4181 /* disable VLAN tag insert/strip */
4182 ctrl = rd32(E1000_CTRL);
4183 ctrl &= ~E1000_CTRL_VME;
4184 wr32(E1000_CTRL, ctrl);
4186 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
4187 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4188 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4191 adapter->max_frame_size);
4194 if (!test_bit(__IGB_DOWN, &adapter->state))
4195 igb_irq_enable(adapter);
4198 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4200 struct igb_adapter *adapter = netdev_priv(netdev);
4201 struct e1000_hw *hw = &adapter->hw;
4204 if ((adapter->hw.mng_cookie.status &
4205 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4206 (vid == adapter->mng_vlan_id))
4208 /* add VID to filter table */
4209 index = (vid >> 5) & 0x7F;
4210 vfta = array_rd32(E1000_VFTA, index);
4211 vfta |= (1 << (vid & 0x1F));
4212 igb_write_vfta(&adapter->hw, index, vfta);
4215 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4217 struct igb_adapter *adapter = netdev_priv(netdev);
4218 struct e1000_hw *hw = &adapter->hw;
4221 igb_irq_disable(adapter);
4222 vlan_group_set_device(adapter->vlgrp, vid, NULL);
4224 if (!test_bit(__IGB_DOWN, &adapter->state))
4225 igb_irq_enable(adapter);
4227 if ((adapter->hw.mng_cookie.status &
4228 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4229 (vid == adapter->mng_vlan_id)) {
4230 /* release control to f/w */
4231 igb_release_hw_control(adapter);
4235 /* remove VID from filter table */
4236 index = (vid >> 5) & 0x7F;
4237 vfta = array_rd32(E1000_VFTA, index);
4238 vfta &= ~(1 << (vid & 0x1F));
4239 igb_write_vfta(&adapter->hw, index, vfta);
4242 static void igb_restore_vlan(struct igb_adapter *adapter)
4244 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4246 if (adapter->vlgrp) {
4248 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4249 if (!vlan_group_get_device(adapter->vlgrp, vid))
4251 igb_vlan_rx_add_vid(adapter->netdev, vid);
4256 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
4258 struct e1000_mac_info *mac = &adapter->hw.mac;
4262 /* Fiber NICs only allow 1000 gbps Full duplex */
4263 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
4264 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4265 dev_err(&adapter->pdev->dev,
4266 "Unsupported Speed/Duplex configuration\n");
4271 case SPEED_10 + DUPLEX_HALF:
4272 mac->forced_speed_duplex = ADVERTISE_10_HALF;
4274 case SPEED_10 + DUPLEX_FULL:
4275 mac->forced_speed_duplex = ADVERTISE_10_FULL;
4277 case SPEED_100 + DUPLEX_HALF:
4278 mac->forced_speed_duplex = ADVERTISE_100_HALF;
4280 case SPEED_100 + DUPLEX_FULL:
4281 mac->forced_speed_duplex = ADVERTISE_100_FULL;
4283 case SPEED_1000 + DUPLEX_FULL:
4285 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
4287 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4289 dev_err(&adapter->pdev->dev,
4290 "Unsupported Speed/Duplex configuration\n");
4297 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
4299 struct net_device *netdev = pci_get_drvdata(pdev);
4300 struct igb_adapter *adapter = netdev_priv(netdev);
4301 struct e1000_hw *hw = &adapter->hw;
4302 u32 ctrl, rctl, status;
4303 u32 wufc = adapter->wol;
4308 netif_device_detach(netdev);
4310 if (netif_running(netdev))
4313 igb_reset_interrupt_capability(adapter);
4315 igb_free_queues(adapter);
4318 retval = pci_save_state(pdev);
4323 status = rd32(E1000_STATUS);
4324 if (status & E1000_STATUS_LU)
4325 wufc &= ~E1000_WUFC_LNKC;
4328 igb_setup_rctl(adapter);
4329 igb_set_multi(netdev);
4331 /* turn on all-multi mode if wake on multicast is enabled */
4332 if (wufc & E1000_WUFC_MC) {
4333 rctl = rd32(E1000_RCTL);
4334 rctl |= E1000_RCTL_MPE;
4335 wr32(E1000_RCTL, rctl);
4338 ctrl = rd32(E1000_CTRL);
4339 /* advertise wake from D3Cold */
4340 #define E1000_CTRL_ADVD3WUC 0x00100000
4341 /* phy power management enable */
4342 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4343 ctrl |= E1000_CTRL_ADVD3WUC;
4344 wr32(E1000_CTRL, ctrl);
4346 /* Allow time for pending master requests to run */
4347 igb_disable_pcie_master(&adapter->hw);
4349 wr32(E1000_WUC, E1000_WUC_PME_EN);
4350 wr32(E1000_WUFC, wufc);
4353 wr32(E1000_WUFC, 0);
4356 /* make sure adapter isn't asleep if manageability/wol is enabled */
4357 if (wufc || adapter->en_mng_pt) {
4358 pci_enable_wake(pdev, PCI_D3hot, 1);
4359 pci_enable_wake(pdev, PCI_D3cold, 1);
4361 igb_shutdown_fiber_serdes_link_82575(hw);
4362 pci_enable_wake(pdev, PCI_D3hot, 0);
4363 pci_enable_wake(pdev, PCI_D3cold, 0);
4366 /* Release control of h/w to f/w. If f/w is AMT enabled, this
4367 * would have already happened in close and is redundant. */
4368 igb_release_hw_control(adapter);
4370 pci_disable_device(pdev);
4372 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4378 static int igb_resume(struct pci_dev *pdev)
4380 struct net_device *netdev = pci_get_drvdata(pdev);
4381 struct igb_adapter *adapter = netdev_priv(netdev);
4382 struct e1000_hw *hw = &adapter->hw;
4385 pci_set_power_state(pdev, PCI_D0);
4386 pci_restore_state(pdev);
4388 if (adapter->need_ioport)
4389 err = pci_enable_device(pdev);
4391 err = pci_enable_device_mem(pdev);
4394 "igb: Cannot enable PCI device from suspend\n");
4397 pci_set_master(pdev);
4399 pci_enable_wake(pdev, PCI_D3hot, 0);
4400 pci_enable_wake(pdev, PCI_D3cold, 0);
4402 igb_set_interrupt_capability(adapter);
4404 if (igb_alloc_queues(adapter)) {
4405 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
4409 /* e1000_power_up_phy(adapter); */
4412 wr32(E1000_WUS, ~0);
4414 if (netif_running(netdev)) {
4415 err = igb_open(netdev);
4420 netif_device_attach(netdev);
4422 /* let the f/w know that the h/w is now under the control of the
4424 igb_get_hw_control(adapter);
4430 static void igb_shutdown(struct pci_dev *pdev)
4432 igb_suspend(pdev, PMSG_SUSPEND);
4435 #ifdef CONFIG_NET_POLL_CONTROLLER
4437 * Polling 'interrupt' - used by things like netconsole to send skbs
4438 * without having to re-enable interrupts. It's not called while
4439 * the interrupt routine is executing.
4441 static void igb_netpoll(struct net_device *netdev)
4443 struct igb_adapter *adapter = netdev_priv(netdev);
4447 igb_irq_disable(adapter);
4448 adapter->flags |= IGB_FLAG_IN_NETPOLL;
4450 for (i = 0; i < adapter->num_tx_queues; i++)
4451 igb_clean_tx_irq(&adapter->tx_ring[i]);
4453 for (i = 0; i < adapter->num_rx_queues; i++)
4454 igb_clean_rx_irq_adv(&adapter->rx_ring[i],
4456 adapter->rx_ring[i].napi.weight);
4458 adapter->flags &= ~IGB_FLAG_IN_NETPOLL;
4459 igb_irq_enable(adapter);
4461 #endif /* CONFIG_NET_POLL_CONTROLLER */
4464 * igb_io_error_detected - called when PCI error is detected
4465 * @pdev: Pointer to PCI device
4466 * @state: The current pci connection state
4468 * This function is called after a PCI bus error affecting
4469 * this device has been detected.
4471 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
4472 pci_channel_state_t state)
4474 struct net_device *netdev = pci_get_drvdata(pdev);
4475 struct igb_adapter *adapter = netdev_priv(netdev);
4477 netif_device_detach(netdev);
4479 if (netif_running(netdev))
4481 pci_disable_device(pdev);
4483 /* Request a slot slot reset. */
4484 return PCI_ERS_RESULT_NEED_RESET;
4488 * igb_io_slot_reset - called after the pci bus has been reset.
4489 * @pdev: Pointer to PCI device
4491 * Restart the card from scratch, as if from a cold-boot. Implementation
4492 * resembles the first-half of the igb_resume routine.
4494 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
4496 struct net_device *netdev = pci_get_drvdata(pdev);
4497 struct igb_adapter *adapter = netdev_priv(netdev);
4498 struct e1000_hw *hw = &adapter->hw;
4501 if (adapter->need_ioport)
4502 err = pci_enable_device(pdev);
4504 err = pci_enable_device_mem(pdev);
4507 "Cannot re-enable PCI device after reset.\n");
4508 return PCI_ERS_RESULT_DISCONNECT;
4510 pci_set_master(pdev);
4511 pci_restore_state(pdev);
4513 pci_enable_wake(pdev, PCI_D3hot, 0);
4514 pci_enable_wake(pdev, PCI_D3cold, 0);
4517 wr32(E1000_WUS, ~0);
4519 return PCI_ERS_RESULT_RECOVERED;
4523 * igb_io_resume - called when traffic can start flowing again.
4524 * @pdev: Pointer to PCI device
4526 * This callback is called when the error recovery driver tells us that
4527 * its OK to resume normal operation. Implementation resembles the
4528 * second-half of the igb_resume routine.
4530 static void igb_io_resume(struct pci_dev *pdev)
4532 struct net_device *netdev = pci_get_drvdata(pdev);
4533 struct igb_adapter *adapter = netdev_priv(netdev);
4535 igb_init_manageability(adapter);
4537 if (netif_running(netdev)) {
4538 if (igb_up(adapter)) {
4539 dev_err(&pdev->dev, "igb_up failed after reset\n");
4544 netif_device_attach(netdev);
4546 /* let the f/w know that the h/w is now under the control of the
4548 igb_get_hw_control(adapter);