1 /*******************************************************************************
3 Intel(R) Gigabit Ethernet Linux driver
4 Copyright(c) 2007-2009 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/init.h>
31 #include <linux/vmalloc.h>
32 #include <linux/pagemap.h>
33 #include <linux/netdevice.h>
34 #include <linux/ipv6.h>
35 #include <net/checksum.h>
36 #include <net/ip6_checksum.h>
37 #include <linux/net_tstamp.h>
38 #include <linux/mii.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_vlan.h>
41 #include <linux/pci.h>
42 #include <linux/pci-aspm.h>
43 #include <linux/delay.h>
44 #include <linux/interrupt.h>
45 #include <linux/if_ether.h>
46 #include <linux/aer.h>
48 #include <linux/dca.h>
52 #define DRV_VERSION "1.3.16-k2"
53 char igb_driver_name[] = "igb";
54 char igb_driver_version[] = DRV_VERSION;
55 static const char igb_driver_string[] =
56 "Intel(R) Gigabit Ethernet Network Driver";
57 static const char igb_copyright[] = "Copyright (c) 2007-2009 Intel Corporation.";
59 static const struct e1000_info *igb_info_tbl[] = {
60 [board_82575] = &e1000_82575_info,
63 static struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
70 /* required last entry */
74 MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
76 void igb_reset(struct igb_adapter *);
77 static int igb_setup_all_tx_resources(struct igb_adapter *);
78 static int igb_setup_all_rx_resources(struct igb_adapter *);
79 static void igb_free_all_tx_resources(struct igb_adapter *);
80 static void igb_free_all_rx_resources(struct igb_adapter *);
81 void igb_update_stats(struct igb_adapter *);
82 static int igb_probe(struct pci_dev *, const struct pci_device_id *);
83 static void __devexit igb_remove(struct pci_dev *pdev);
84 static int igb_sw_init(struct igb_adapter *);
85 static int igb_open(struct net_device *);
86 static int igb_close(struct net_device *);
87 static void igb_configure_tx(struct igb_adapter *);
88 static void igb_configure_rx(struct igb_adapter *);
89 static void igb_setup_rctl(struct igb_adapter *);
90 static void igb_clean_all_tx_rings(struct igb_adapter *);
91 static void igb_clean_all_rx_rings(struct igb_adapter *);
92 static void igb_clean_tx_ring(struct igb_ring *);
93 static void igb_clean_rx_ring(struct igb_ring *);
94 static void igb_set_multi(struct net_device *);
95 static void igb_update_phy_info(unsigned long);
96 static void igb_watchdog(unsigned long);
97 static void igb_watchdog_task(struct work_struct *);
98 static int igb_xmit_frame_ring_adv(struct sk_buff *, struct net_device *,
100 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
101 static struct net_device_stats *igb_get_stats(struct net_device *);
102 static int igb_change_mtu(struct net_device *, int);
103 static int igb_set_mac(struct net_device *, void *);
104 static irqreturn_t igb_intr(int irq, void *);
105 static irqreturn_t igb_intr_msi(int irq, void *);
106 static irqreturn_t igb_msix_other(int irq, void *);
107 static irqreturn_t igb_msix_rx(int irq, void *);
108 static irqreturn_t igb_msix_tx(int irq, void *);
109 #ifdef CONFIG_IGB_DCA
110 static void igb_update_rx_dca(struct igb_ring *);
111 static void igb_update_tx_dca(struct igb_ring *);
112 static void igb_setup_dca(struct igb_adapter *);
113 #endif /* CONFIG_IGB_DCA */
114 static bool igb_clean_tx_irq(struct igb_ring *);
115 static int igb_poll(struct napi_struct *, int);
116 static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
117 static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
118 static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
119 static void igb_tx_timeout(struct net_device *);
120 static void igb_reset_task(struct work_struct *);
121 static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
122 static void igb_vlan_rx_add_vid(struct net_device *, u16);
123 static void igb_vlan_rx_kill_vid(struct net_device *, u16);
124 static void igb_restore_vlan(struct igb_adapter *);
125 static void igb_ping_all_vfs(struct igb_adapter *);
126 static void igb_msg_task(struct igb_adapter *);
127 static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
128 static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
129 static void igb_set_mc_list_pools(struct igb_adapter *, int, u16);
130 static void igb_vmm_control(struct igb_adapter *);
131 static inline void igb_set_vmolr(struct e1000_hw *, int);
132 static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int);
133 static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *);
134 static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
136 static int igb_suspend(struct pci_dev *, pm_message_t);
138 static int igb_resume(struct pci_dev *);
140 static void igb_shutdown(struct pci_dev *);
141 #ifdef CONFIG_IGB_DCA
142 static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
143 static struct notifier_block dca_notifier = {
144 .notifier_call = igb_notify_dca,
149 #ifdef CONFIG_NET_POLL_CONTROLLER
150 /* for netdump / net console */
151 static void igb_netpoll(struct net_device *);
154 #ifdef CONFIG_PCI_IOV
155 static ssize_t igb_set_num_vfs(struct device *, struct device_attribute *,
156 const char *, size_t);
157 static ssize_t igb_show_num_vfs(struct device *, struct device_attribute *,
159 DEVICE_ATTR(num_vfs, S_IRUGO | S_IWUSR, igb_show_num_vfs, igb_set_num_vfs);
161 static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
162 pci_channel_state_t);
163 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
164 static void igb_io_resume(struct pci_dev *);
166 static struct pci_error_handlers igb_err_handler = {
167 .error_detected = igb_io_error_detected,
168 .slot_reset = igb_io_slot_reset,
169 .resume = igb_io_resume,
173 static struct pci_driver igb_driver = {
174 .name = igb_driver_name,
175 .id_table = igb_pci_tbl,
177 .remove = __devexit_p(igb_remove),
179 /* Power Managment Hooks */
180 .suspend = igb_suspend,
181 .resume = igb_resume,
183 .shutdown = igb_shutdown,
184 .err_handler = &igb_err_handler
187 static int global_quad_port_a; /* global quad port a indication */
189 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
190 MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
191 MODULE_LICENSE("GPL");
192 MODULE_VERSION(DRV_VERSION);
195 * Scale the NIC clock cycle by a large factor so that
196 * relatively small clock corrections can be added or
197 * substracted at each clock tick. The drawbacks of a
198 * large factor are a) that the clock register overflows
199 * more quickly (not such a big deal) and b) that the
200 * increment per tick has to fit into 24 bits.
203 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
205 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
207 * The base scale factor is intentionally a power of two
208 * so that the division in %struct timecounter can be done with
211 #define IGB_TSYNC_SHIFT (19)
212 #define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
215 * The duration of one clock cycle of the NIC.
217 * @todo This hard-coded value is part of the specification and might change
218 * in future hardware revisions. Add revision check.
220 #define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
222 #if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
223 # error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
227 * igb_read_clock - read raw cycle counter (to be used by time counter)
229 static cycle_t igb_read_clock(const struct cyclecounter *tc)
231 struct igb_adapter *adapter =
232 container_of(tc, struct igb_adapter, cycles);
233 struct e1000_hw *hw = &adapter->hw;
236 stamp = rd32(E1000_SYSTIML);
237 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
244 * igb_get_hw_dev_name - return device name string
245 * used by hardware layer to print debugging information
247 char *igb_get_hw_dev_name(struct e1000_hw *hw)
249 struct igb_adapter *adapter = hw->back;
250 return adapter->netdev->name;
254 * igb_get_time_str - format current NIC and system time as string
256 static char *igb_get_time_str(struct igb_adapter *adapter,
259 cycle_t hw = adapter->cycles.read(&adapter->cycles);
260 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
262 struct timespec delta;
263 getnstimeofday(&sys);
265 delta = timespec_sub(nic, sys);
268 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
270 (long)nic.tv_sec, nic.tv_nsec,
271 (long)sys.tv_sec, sys.tv_nsec,
272 (long)delta.tv_sec, delta.tv_nsec);
279 * igb_init_module - Driver Registration Routine
281 * igb_init_module is the first routine called when the driver is
282 * loaded. All it does is register with the PCI subsystem.
284 static int __init igb_init_module(void)
287 printk(KERN_INFO "%s - version %s\n",
288 igb_driver_string, igb_driver_version);
290 printk(KERN_INFO "%s\n", igb_copyright);
292 global_quad_port_a = 0;
294 #ifdef CONFIG_IGB_DCA
295 dca_register_notify(&dca_notifier);
298 ret = pci_register_driver(&igb_driver);
302 module_init(igb_init_module);
305 * igb_exit_module - Driver Exit Cleanup Routine
307 * igb_exit_module is called just before the driver is removed
310 static void __exit igb_exit_module(void)
312 #ifdef CONFIG_IGB_DCA
313 dca_unregister_notify(&dca_notifier);
315 pci_unregister_driver(&igb_driver);
318 module_exit(igb_exit_module);
320 #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
322 * igb_cache_ring_register - Descriptor ring to register mapping
323 * @adapter: board private structure to initialize
325 * Once we know the feature-set enabled for the device, we'll cache
326 * the register offset the descriptor ring is assigned to.
328 static void igb_cache_ring_register(struct igb_adapter *adapter)
331 unsigned int rbase_offset = adapter->vfs_allocated_count;
333 switch (adapter->hw.mac.type) {
335 /* The queues are allocated for virtualization such that VF 0
336 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
337 * In order to avoid collision we start at the first free queue
338 * and continue consuming queues in the same sequence
340 for (i = 0; i < adapter->num_rx_queues; i++)
341 adapter->rx_ring[i].reg_idx = rbase_offset +
343 for (i = 0; i < adapter->num_tx_queues; i++)
344 adapter->tx_ring[i].reg_idx = rbase_offset +
349 for (i = 0; i < adapter->num_rx_queues; i++)
350 adapter->rx_ring[i].reg_idx = i;
351 for (i = 0; i < adapter->num_tx_queues; i++)
352 adapter->tx_ring[i].reg_idx = i;
358 * igb_alloc_queues - Allocate memory for all rings
359 * @adapter: board private structure to initialize
361 * We allocate one ring per queue at run-time since we don't know the
362 * number of queues at compile-time.
364 static int igb_alloc_queues(struct igb_adapter *adapter)
368 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
369 sizeof(struct igb_ring), GFP_KERNEL);
370 if (!adapter->tx_ring)
373 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
374 sizeof(struct igb_ring), GFP_KERNEL);
375 if (!adapter->rx_ring) {
376 kfree(adapter->tx_ring);
380 adapter->rx_ring->buddy = adapter->tx_ring;
382 for (i = 0; i < adapter->num_tx_queues; i++) {
383 struct igb_ring *ring = &(adapter->tx_ring[i]);
384 ring->count = adapter->tx_ring_count;
385 ring->adapter = adapter;
386 ring->queue_index = i;
388 for (i = 0; i < adapter->num_rx_queues; i++) {
389 struct igb_ring *ring = &(adapter->rx_ring[i]);
390 ring->count = adapter->rx_ring_count;
391 ring->adapter = adapter;
392 ring->queue_index = i;
393 ring->itr_register = E1000_ITR;
395 /* set a default napi handler for each rx_ring */
396 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64);
399 igb_cache_ring_register(adapter);
403 static void igb_free_queues(struct igb_adapter *adapter)
407 for (i = 0; i < adapter->num_rx_queues; i++)
408 netif_napi_del(&adapter->rx_ring[i].napi);
410 kfree(adapter->tx_ring);
411 kfree(adapter->rx_ring);
414 #define IGB_N0_QUEUE -1
415 static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
416 int tx_queue, int msix_vector)
419 struct e1000_hw *hw = &adapter->hw;
422 switch (hw->mac.type) {
424 /* The 82575 assigns vectors using a bitmask, which matches the
425 bitmask for the EICR/EIMS/EIMC registers. To assign one
426 or more queues to a vector, we write the appropriate bits
427 into the MSIXBM register for that vector. */
428 if (rx_queue > IGB_N0_QUEUE) {
429 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
430 adapter->rx_ring[rx_queue].eims_value = msixbm;
432 if (tx_queue > IGB_N0_QUEUE) {
433 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
434 adapter->tx_ring[tx_queue].eims_value =
435 E1000_EICR_TX_QUEUE0 << tx_queue;
437 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
440 /* 82576 uses a table-based method for assigning vectors.
441 Each queue has a single entry in the table to which we write
442 a vector number along with a "valid" bit. Sadly, the layout
443 of the table is somewhat counterintuitive. */
444 if (rx_queue > IGB_N0_QUEUE) {
445 index = (rx_queue >> 1) + adapter->vfs_allocated_count;
446 ivar = array_rd32(E1000_IVAR0, index);
447 if (rx_queue & 0x1) {
448 /* vector goes into third byte of register */
449 ivar = ivar & 0xFF00FFFF;
450 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
452 /* vector goes into low byte of register */
453 ivar = ivar & 0xFFFFFF00;
454 ivar |= msix_vector | E1000_IVAR_VALID;
456 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
457 array_wr32(E1000_IVAR0, index, ivar);
459 if (tx_queue > IGB_N0_QUEUE) {
460 index = (tx_queue >> 1) + adapter->vfs_allocated_count;
461 ivar = array_rd32(E1000_IVAR0, index);
462 if (tx_queue & 0x1) {
463 /* vector goes into high byte of register */
464 ivar = ivar & 0x00FFFFFF;
465 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
467 /* vector goes into second byte of register */
468 ivar = ivar & 0xFFFF00FF;
469 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
471 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
472 array_wr32(E1000_IVAR0, index, ivar);
482 * igb_configure_msix - Configure MSI-X hardware
484 * igb_configure_msix sets up the hardware to properly
485 * generate MSI-X interrupts.
487 static void igb_configure_msix(struct igb_adapter *adapter)
491 struct e1000_hw *hw = &adapter->hw;
493 adapter->eims_enable_mask = 0;
494 if (hw->mac.type == e1000_82576)
495 /* Turn on MSI-X capability first, or our settings
496 * won't stick. And it will take days to debug. */
497 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
498 E1000_GPIE_PBA | E1000_GPIE_EIAME |
501 for (i = 0; i < adapter->num_tx_queues; i++) {
502 struct igb_ring *tx_ring = &adapter->tx_ring[i];
503 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
504 adapter->eims_enable_mask |= tx_ring->eims_value;
505 if (tx_ring->itr_val)
506 writel(tx_ring->itr_val,
507 hw->hw_addr + tx_ring->itr_register);
509 writel(1, hw->hw_addr + tx_ring->itr_register);
512 for (i = 0; i < adapter->num_rx_queues; i++) {
513 struct igb_ring *rx_ring = &adapter->rx_ring[i];
514 rx_ring->buddy = NULL;
515 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
516 adapter->eims_enable_mask |= rx_ring->eims_value;
517 if (rx_ring->itr_val)
518 writel(rx_ring->itr_val,
519 hw->hw_addr + rx_ring->itr_register);
521 writel(1, hw->hw_addr + rx_ring->itr_register);
525 /* set vector for other causes, i.e. link changes */
526 switch (hw->mac.type) {
528 array_wr32(E1000_MSIXBM(0), vector++,
531 tmp = rd32(E1000_CTRL_EXT);
532 /* enable MSI-X PBA support*/
533 tmp |= E1000_CTRL_EXT_PBA_CLR;
535 /* Auto-Mask interrupts upon ICR read. */
536 tmp |= E1000_CTRL_EXT_EIAME;
537 tmp |= E1000_CTRL_EXT_IRCA;
539 wr32(E1000_CTRL_EXT, tmp);
540 adapter->eims_enable_mask |= E1000_EIMS_OTHER;
541 adapter->eims_other = E1000_EIMS_OTHER;
546 tmp = (vector++ | E1000_IVAR_VALID) << 8;
547 wr32(E1000_IVAR_MISC, tmp);
549 adapter->eims_enable_mask = (1 << (vector)) - 1;
550 adapter->eims_other = 1 << (vector - 1);
553 /* do nothing, since nothing else supports MSI-X */
555 } /* switch (hw->mac.type) */
560 * igb_request_msix - Initialize MSI-X interrupts
562 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
565 static int igb_request_msix(struct igb_adapter *adapter)
567 struct net_device *netdev = adapter->netdev;
568 int i, err = 0, vector = 0;
572 for (i = 0; i < adapter->num_tx_queues; i++) {
573 struct igb_ring *ring = &(adapter->tx_ring[i]);
574 sprintf(ring->name, "%s-tx-%d", netdev->name, i);
575 err = request_irq(adapter->msix_entries[vector].vector,
576 &igb_msix_tx, 0, ring->name,
577 &(adapter->tx_ring[i]));
580 ring->itr_register = E1000_EITR(0) + (vector << 2);
581 ring->itr_val = 976; /* ~4000 ints/sec */
584 for (i = 0; i < adapter->num_rx_queues; i++) {
585 struct igb_ring *ring = &(adapter->rx_ring[i]);
586 if (strlen(netdev->name) < (IFNAMSIZ - 5))
587 sprintf(ring->name, "%s-rx-%d", netdev->name, i);
589 memcpy(ring->name, netdev->name, IFNAMSIZ);
590 err = request_irq(adapter->msix_entries[vector].vector,
591 &igb_msix_rx, 0, ring->name,
592 &(adapter->rx_ring[i]));
595 ring->itr_register = E1000_EITR(0) + (vector << 2);
596 ring->itr_val = adapter->itr;
600 err = request_irq(adapter->msix_entries[vector].vector,
601 &igb_msix_other, 0, netdev->name, netdev);
605 igb_configure_msix(adapter);
611 static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
613 if (adapter->msix_entries) {
614 pci_disable_msix(adapter->pdev);
615 kfree(adapter->msix_entries);
616 adapter->msix_entries = NULL;
617 } else if (adapter->flags & IGB_FLAG_HAS_MSI)
618 pci_disable_msi(adapter->pdev);
624 * igb_set_interrupt_capability - set MSI or MSI-X if supported
626 * Attempt to configure interrupts using the best available
627 * capabilities of the hardware and kernel.
629 static void igb_set_interrupt_capability(struct igb_adapter *adapter)
634 /* Number of supported queues. */
635 /* Having more queues than CPUs doesn't make sense. */
636 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
637 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus());
639 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1;
640 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
642 if (!adapter->msix_entries)
645 for (i = 0; i < numvecs; i++)
646 adapter->msix_entries[i].entry = i;
648 err = pci_enable_msix(adapter->pdev,
649 adapter->msix_entries,
654 igb_reset_interrupt_capability(adapter);
656 /* If we can't do MSI-X, try MSI */
658 adapter->num_rx_queues = 1;
659 adapter->num_tx_queues = 1;
660 if (!pci_enable_msi(adapter->pdev))
661 adapter->flags |= IGB_FLAG_HAS_MSI;
663 /* Notify the stack of the (possibly) reduced Tx Queue count. */
664 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
669 * igb_request_irq - initialize interrupts
671 * Attempts to configure interrupts using the best available
672 * capabilities of the hardware and kernel.
674 static int igb_request_irq(struct igb_adapter *adapter)
676 struct net_device *netdev = adapter->netdev;
677 struct e1000_hw *hw = &adapter->hw;
680 if (adapter->msix_entries) {
681 err = igb_request_msix(adapter);
684 /* fall back to MSI */
685 igb_reset_interrupt_capability(adapter);
686 if (!pci_enable_msi(adapter->pdev))
687 adapter->flags |= IGB_FLAG_HAS_MSI;
688 igb_free_all_tx_resources(adapter);
689 igb_free_all_rx_resources(adapter);
690 adapter->num_rx_queues = 1;
691 igb_alloc_queues(adapter);
693 switch (hw->mac.type) {
695 wr32(E1000_MSIXBM(0),
696 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER));
699 wr32(E1000_IVAR0, E1000_IVAR_VALID);
706 if (adapter->flags & IGB_FLAG_HAS_MSI) {
707 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
708 netdev->name, netdev);
711 /* fall back to legacy interrupts */
712 igb_reset_interrupt_capability(adapter);
713 adapter->flags &= ~IGB_FLAG_HAS_MSI;
716 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
717 netdev->name, netdev);
720 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
727 static void igb_free_irq(struct igb_adapter *adapter)
729 struct net_device *netdev = adapter->netdev;
731 if (adapter->msix_entries) {
734 for (i = 0; i < adapter->num_tx_queues; i++)
735 free_irq(adapter->msix_entries[vector++].vector,
736 &(adapter->tx_ring[i]));
737 for (i = 0; i < adapter->num_rx_queues; i++)
738 free_irq(adapter->msix_entries[vector++].vector,
739 &(adapter->rx_ring[i]));
741 free_irq(adapter->msix_entries[vector++].vector, netdev);
745 free_irq(adapter->pdev->irq, netdev);
749 * igb_irq_disable - Mask off interrupt generation on the NIC
750 * @adapter: board private structure
752 static void igb_irq_disable(struct igb_adapter *adapter)
754 struct e1000_hw *hw = &adapter->hw;
756 if (adapter->msix_entries) {
758 wr32(E1000_EIMC, ~0);
765 synchronize_irq(adapter->pdev->irq);
769 * igb_irq_enable - Enable default interrupt generation settings
770 * @adapter: board private structure
772 static void igb_irq_enable(struct igb_adapter *adapter)
774 struct e1000_hw *hw = &adapter->hw;
776 if (adapter->msix_entries) {
777 wr32(E1000_EIAC, adapter->eims_enable_mask);
778 wr32(E1000_EIAM, adapter->eims_enable_mask);
779 wr32(E1000_EIMS, adapter->eims_enable_mask);
780 if (adapter->vfs_allocated_count)
781 wr32(E1000_MBVFIMR, 0xFF);
782 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB |
783 E1000_IMS_DOUTSYNC));
785 wr32(E1000_IMS, IMS_ENABLE_MASK);
786 wr32(E1000_IAM, IMS_ENABLE_MASK);
790 static void igb_update_mng_vlan(struct igb_adapter *adapter)
792 struct net_device *netdev = adapter->netdev;
793 u16 vid = adapter->hw.mng_cookie.vlan_id;
794 u16 old_vid = adapter->mng_vlan_id;
795 if (adapter->vlgrp) {
796 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
797 if (adapter->hw.mng_cookie.status &
798 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
799 igb_vlan_rx_add_vid(netdev, vid);
800 adapter->mng_vlan_id = vid;
802 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
804 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
806 !vlan_group_get_device(adapter->vlgrp, old_vid))
807 igb_vlan_rx_kill_vid(netdev, old_vid);
809 adapter->mng_vlan_id = vid;
814 * igb_release_hw_control - release control of the h/w to f/w
815 * @adapter: address of board private structure
817 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
818 * For ASF and Pass Through versions of f/w this means that the
819 * driver is no longer loaded.
822 static void igb_release_hw_control(struct igb_adapter *adapter)
824 struct e1000_hw *hw = &adapter->hw;
827 /* Let firmware take over control of h/w */
828 ctrl_ext = rd32(E1000_CTRL_EXT);
830 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
835 * igb_get_hw_control - get control of the h/w from f/w
836 * @adapter: address of board private structure
838 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
839 * For ASF and Pass Through versions of f/w this means that
840 * the driver is loaded.
843 static void igb_get_hw_control(struct igb_adapter *adapter)
845 struct e1000_hw *hw = &adapter->hw;
848 /* Let firmware know the driver has taken over */
849 ctrl_ext = rd32(E1000_CTRL_EXT);
851 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
855 * igb_configure - configure the hardware for RX and TX
856 * @adapter: private board structure
858 static void igb_configure(struct igb_adapter *adapter)
860 struct net_device *netdev = adapter->netdev;
863 igb_get_hw_control(adapter);
864 igb_set_multi(netdev);
866 igb_restore_vlan(adapter);
868 igb_configure_tx(adapter);
869 igb_setup_rctl(adapter);
870 igb_configure_rx(adapter);
872 igb_rx_fifo_flush_82575(&adapter->hw);
874 /* call IGB_DESC_UNUSED which always leaves
875 * at least 1 descriptor unused to make sure
876 * next_to_use != next_to_clean */
877 for (i = 0; i < adapter->num_rx_queues; i++) {
878 struct igb_ring *ring = &adapter->rx_ring[i];
879 igb_alloc_rx_buffers_adv(ring, IGB_DESC_UNUSED(ring));
883 adapter->tx_queue_len = netdev->tx_queue_len;
888 * igb_up - Open the interface and prepare it to handle traffic
889 * @adapter: board private structure
892 int igb_up(struct igb_adapter *adapter)
894 struct e1000_hw *hw = &adapter->hw;
897 /* hardware has been reset, we need to reload some things */
898 igb_configure(adapter);
900 clear_bit(__IGB_DOWN, &adapter->state);
902 for (i = 0; i < adapter->num_rx_queues; i++)
903 napi_enable(&adapter->rx_ring[i].napi);
904 if (adapter->msix_entries)
905 igb_configure_msix(adapter);
907 igb_vmm_control(adapter);
908 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
909 igb_set_vmolr(hw, adapter->vfs_allocated_count);
911 /* Clear any pending interrupts. */
913 igb_irq_enable(adapter);
915 /* Fire a link change interrupt to start the watchdog. */
916 wr32(E1000_ICS, E1000_ICS_LSC);
920 void igb_down(struct igb_adapter *adapter)
922 struct e1000_hw *hw = &adapter->hw;
923 struct net_device *netdev = adapter->netdev;
927 /* signal that we're down so the interrupt handler does not
928 * reschedule our watchdog timer */
929 set_bit(__IGB_DOWN, &adapter->state);
931 /* disable receives in the hardware */
932 rctl = rd32(E1000_RCTL);
933 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
934 /* flush and sleep below */
936 netif_tx_stop_all_queues(netdev);
938 /* disable transmits in the hardware */
939 tctl = rd32(E1000_TCTL);
940 tctl &= ~E1000_TCTL_EN;
941 wr32(E1000_TCTL, tctl);
942 /* flush both disables and wait for them to finish */
946 for (i = 0; i < adapter->num_rx_queues; i++)
947 napi_disable(&adapter->rx_ring[i].napi);
949 igb_irq_disable(adapter);
951 del_timer_sync(&adapter->watchdog_timer);
952 del_timer_sync(&adapter->phy_info_timer);
954 netdev->tx_queue_len = adapter->tx_queue_len;
955 netif_carrier_off(netdev);
957 /* record the stats before reset*/
958 igb_update_stats(adapter);
960 adapter->link_speed = 0;
961 adapter->link_duplex = 0;
963 if (!pci_channel_offline(adapter->pdev))
965 igb_clean_all_tx_rings(adapter);
966 igb_clean_all_rx_rings(adapter);
969 void igb_reinit_locked(struct igb_adapter *adapter)
971 WARN_ON(in_interrupt());
972 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
976 clear_bit(__IGB_RESETTING, &adapter->state);
979 void igb_reset(struct igb_adapter *adapter)
981 struct e1000_hw *hw = &adapter->hw;
982 struct e1000_mac_info *mac = &hw->mac;
983 struct e1000_fc_info *fc = &hw->fc;
984 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
987 /* Repartition Pba for greater than 9k mtu
988 * To take effect CTRL.RST is required.
1000 if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
1001 (mac->type < e1000_82576)) {
1002 /* adjust PBA for jumbo frames */
1003 wr32(E1000_PBA, pba);
1005 /* To maintain wire speed transmits, the Tx FIFO should be
1006 * large enough to accommodate two full transmit packets,
1007 * rounded up to the next 1KB and expressed in KB. Likewise,
1008 * the Rx FIFO should be large enough to accommodate at least
1009 * one full receive packet and is similarly rounded up and
1010 * expressed in KB. */
1011 pba = rd32(E1000_PBA);
1012 /* upper 16 bits has Tx packet buffer allocation size in KB */
1013 tx_space = pba >> 16;
1014 /* lower 16 bits has Rx packet buffer allocation size in KB */
1016 /* the tx fifo also stores 16 bytes of information about the tx
1017 * but don't include ethernet FCS because hardware appends it */
1018 min_tx_space = (adapter->max_frame_size +
1019 sizeof(union e1000_adv_tx_desc) -
1021 min_tx_space = ALIGN(min_tx_space, 1024);
1022 min_tx_space >>= 10;
1023 /* software strips receive CRC, so leave room for it */
1024 min_rx_space = adapter->max_frame_size;
1025 min_rx_space = ALIGN(min_rx_space, 1024);
1026 min_rx_space >>= 10;
1028 /* If current Tx allocation is less than the min Tx FIFO size,
1029 * and the min Tx FIFO size is less than the current Rx FIFO
1030 * allocation, take space away from current Rx allocation */
1031 if (tx_space < min_tx_space &&
1032 ((min_tx_space - tx_space) < pba)) {
1033 pba = pba - (min_tx_space - tx_space);
1035 /* if short on rx space, rx wins and must trump tx
1037 if (pba < min_rx_space)
1040 wr32(E1000_PBA, pba);
1043 /* flow control settings */
1044 /* The high water mark must be low enough to fit one full frame
1045 * (or the size used for early receive) above it in the Rx FIFO.
1046 * Set it to the lower of:
1047 * - 90% of the Rx FIFO size, or
1048 * - the full Rx FIFO size minus one full frame */
1049 hwm = min(((pba << 10) * 9 / 10),
1050 ((pba << 10) - 2 * adapter->max_frame_size));
1052 if (mac->type < e1000_82576) {
1053 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
1054 fc->low_water = fc->high_water - 8;
1056 fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
1057 fc->low_water = fc->high_water - 16;
1059 fc->pause_time = 0xFFFF;
1061 fc->type = fc->original_type;
1063 /* disable receive for all VFs and wait one second */
1064 if (adapter->vfs_allocated_count) {
1066 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1067 adapter->vf_data[i].clear_to_send = false;
1069 /* ping all the active vfs to let them know we are going down */
1070 igb_ping_all_vfs(adapter);
1072 /* disable transmits and receives */
1073 wr32(E1000_VFRE, 0);
1074 wr32(E1000_VFTE, 0);
1077 /* Allow time for pending master requests to run */
1078 adapter->hw.mac.ops.reset_hw(&adapter->hw);
1081 if (adapter->hw.mac.ops.init_hw(&adapter->hw))
1082 dev_err(&adapter->pdev->dev, "Hardware Error\n");
1084 igb_update_mng_vlan(adapter);
1086 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1087 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1089 igb_reset_adaptive(&adapter->hw);
1090 igb_get_phy_info(&adapter->hw);
1093 static const struct net_device_ops igb_netdev_ops = {
1094 .ndo_open = igb_open,
1095 .ndo_stop = igb_close,
1096 .ndo_start_xmit = igb_xmit_frame_adv,
1097 .ndo_get_stats = igb_get_stats,
1098 .ndo_set_multicast_list = igb_set_multi,
1099 .ndo_set_mac_address = igb_set_mac,
1100 .ndo_change_mtu = igb_change_mtu,
1101 .ndo_do_ioctl = igb_ioctl,
1102 .ndo_tx_timeout = igb_tx_timeout,
1103 .ndo_validate_addr = eth_validate_addr,
1104 .ndo_vlan_rx_register = igb_vlan_rx_register,
1105 .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
1106 .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
1107 #ifdef CONFIG_NET_POLL_CONTROLLER
1108 .ndo_poll_controller = igb_netpoll,
1113 * igb_probe - Device Initialization Routine
1114 * @pdev: PCI device information struct
1115 * @ent: entry in igb_pci_tbl
1117 * Returns 0 on success, negative on failure
1119 * igb_probe initializes an adapter identified by a pci_dev structure.
1120 * The OS initialization, configuring of the adapter private structure,
1121 * and a hardware reset occur.
1123 static int __devinit igb_probe(struct pci_dev *pdev,
1124 const struct pci_device_id *ent)
1126 struct net_device *netdev;
1127 struct igb_adapter *adapter;
1128 struct e1000_hw *hw;
1129 struct pci_dev *us_dev;
1130 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1131 unsigned long mmio_start, mmio_len;
1132 int err, pci_using_dac, pos;
1133 u16 eeprom_data = 0, state = 0;
1134 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1137 err = pci_enable_device_mem(pdev);
1142 err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1144 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1148 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1150 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1152 dev_err(&pdev->dev, "No usable DMA "
1153 "configuration, aborting\n");
1159 /* 82575 requires that the pci-e link partner disable the L0s state */
1160 switch (pdev->device) {
1161 case E1000_DEV_ID_82575EB_COPPER:
1162 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1163 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1164 us_dev = pdev->bus->self;
1165 pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
1167 pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1169 state &= ~PCIE_LINK_STATE_L0S;
1170 pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL,
1172 dev_info(&pdev->dev,
1173 "Disabling ASPM L0s upstream switch port %s\n",
1180 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
1186 err = pci_enable_pcie_error_reporting(pdev);
1188 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
1190 /* non-fatal, continue */
1193 pci_set_master(pdev);
1194 pci_save_state(pdev);
1197 netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
1198 IGB_ABS_MAX_TX_QUEUES);
1200 goto err_alloc_etherdev;
1202 SET_NETDEV_DEV(netdev, &pdev->dev);
1204 pci_set_drvdata(pdev, netdev);
1205 adapter = netdev_priv(netdev);
1206 adapter->netdev = netdev;
1207 adapter->pdev = pdev;
1210 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE;
1212 mmio_start = pci_resource_start(pdev, 0);
1213 mmio_len = pci_resource_len(pdev, 0);
1216 hw->hw_addr = ioremap(mmio_start, mmio_len);
1220 netdev->netdev_ops = &igb_netdev_ops;
1221 igb_set_ethtool_ops(netdev);
1222 netdev->watchdog_timeo = 5 * HZ;
1224 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1226 netdev->mem_start = mmio_start;
1227 netdev->mem_end = mmio_start + mmio_len;
1229 /* PCI config space info */
1230 hw->vendor_id = pdev->vendor;
1231 hw->device_id = pdev->device;
1232 hw->revision_id = pdev->revision;
1233 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1234 hw->subsystem_device_id = pdev->subsystem_device;
1236 /* setup the private structure */
1238 /* Copy the default MAC, PHY and NVM function pointers */
1239 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1240 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
1241 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
1242 /* Initialize skew-specific constants */
1243 err = ei->get_invariants(hw);
1247 /* setup the private structure */
1248 err = igb_sw_init(adapter);
1252 igb_get_bus_info_pcie(hw);
1255 switch (hw->mac.type) {
1257 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1264 hw->phy.autoneg_wait_to_complete = false;
1265 hw->mac.adaptive_ifs = true;
1267 /* Copper options */
1268 if (hw->phy.media_type == e1000_media_type_copper) {
1269 hw->phy.mdix = AUTO_ALL_MODES;
1270 hw->phy.disable_polarity_correction = false;
1271 hw->phy.ms_type = e1000_ms_hw_default;
1274 if (igb_check_reset_block(hw))
1275 dev_info(&pdev->dev,
1276 "PHY reset is blocked due to SOL/IDER session.\n");
1278 netdev->features = NETIF_F_SG |
1280 NETIF_F_HW_VLAN_TX |
1281 NETIF_F_HW_VLAN_RX |
1282 NETIF_F_HW_VLAN_FILTER;
1284 netdev->features |= NETIF_F_IPV6_CSUM;
1285 netdev->features |= NETIF_F_TSO;
1286 netdev->features |= NETIF_F_TSO6;
1288 netdev->features |= NETIF_F_GRO;
1290 netdev->vlan_features |= NETIF_F_TSO;
1291 netdev->vlan_features |= NETIF_F_TSO6;
1292 netdev->vlan_features |= NETIF_F_IP_CSUM;
1293 netdev->vlan_features |= NETIF_F_SG;
1296 netdev->features |= NETIF_F_HIGHDMA;
1298 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw);
1300 /* before reading the NVM, reset the controller to put the device in a
1301 * known good starting state */
1302 hw->mac.ops.reset_hw(hw);
1304 /* make sure the NVM is good */
1305 if (igb_validate_nvm_checksum(hw) < 0) {
1306 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
1311 /* copy the MAC address out of the NVM */
1312 if (hw->mac.ops.read_mac_addr(hw))
1313 dev_err(&pdev->dev, "NVM Read Error\n");
1315 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
1316 memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
1318 if (!is_valid_ether_addr(netdev->perm_addr)) {
1319 dev_err(&pdev->dev, "Invalid MAC Address\n");
1324 init_timer(&adapter->watchdog_timer);
1325 adapter->watchdog_timer.function = &igb_watchdog;
1326 adapter->watchdog_timer.data = (unsigned long) adapter;
1328 init_timer(&adapter->phy_info_timer);
1329 adapter->phy_info_timer.function = &igb_update_phy_info;
1330 adapter->phy_info_timer.data = (unsigned long) adapter;
1332 INIT_WORK(&adapter->reset_task, igb_reset_task);
1333 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
1335 /* Initialize link properties that are user-changeable */
1336 adapter->fc_autoneg = true;
1337 hw->mac.autoneg = true;
1338 hw->phy.autoneg_advertised = 0x2f;
1340 hw->fc.original_type = e1000_fc_default;
1341 hw->fc.type = e1000_fc_default;
1343 adapter->itr_setting = IGB_DEFAULT_ITR;
1344 adapter->itr = IGB_START_ITR;
1346 igb_validate_mdi_setting(hw);
1348 adapter->rx_csum = 1;
1350 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
1351 * enable the ACPI Magic Packet filter
1354 if (hw->bus.func == 0)
1355 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1356 else if (hw->bus.func == 1)
1357 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1359 if (eeprom_data & eeprom_apme_mask)
1360 adapter->eeprom_wol |= E1000_WUFC_MAG;
1362 /* now that we have the eeprom settings, apply the special cases where
1363 * the eeprom may be wrong or the board simply won't support wake on
1364 * lan on a particular port */
1365 switch (pdev->device) {
1366 case E1000_DEV_ID_82575GB_QUAD_COPPER:
1367 adapter->eeprom_wol = 0;
1369 case E1000_DEV_ID_82575EB_FIBER_SERDES:
1370 case E1000_DEV_ID_82576_FIBER:
1371 case E1000_DEV_ID_82576_SERDES:
1372 /* Wake events only supported on port A for dual fiber
1373 * regardless of eeprom setting */
1374 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
1375 adapter->eeprom_wol = 0;
1379 /* initialize the wol settings based on the eeprom settings */
1380 adapter->wol = adapter->eeprom_wol;
1381 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1383 /* reset the hardware with the new settings */
1386 /* let the f/w know that the h/w is now under the control of the
1388 igb_get_hw_control(adapter);
1390 /* tell the stack to leave us alone until igb_open() is called */
1391 netif_carrier_off(netdev);
1392 netif_tx_stop_all_queues(netdev);
1394 strcpy(netdev->name, "eth%d");
1395 err = register_netdev(netdev);
1399 #ifdef CONFIG_PCI_IOV
1400 /* since iov functionality isn't critical to base device function we
1401 * can accept failure. If it fails we don't allow iov to be enabled */
1402 if (hw->mac.type == e1000_82576) {
1403 err = pci_enable_sriov(pdev, 0);
1405 err = device_create_file(&netdev->dev,
1408 dev_err(&pdev->dev, "Failed to initialize IOV\n");
1412 #ifdef CONFIG_IGB_DCA
1413 if (dca_add_requester(&pdev->dev) == 0) {
1414 adapter->flags |= IGB_FLAG_DCA_ENABLED;
1415 dev_info(&pdev->dev, "DCA enabled\n");
1416 /* Always use CB2 mode, difference is masked
1417 * in the CB driver. */
1418 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
1419 igb_setup_dca(adapter);
1424 * Initialize hardware timer: we keep it running just in case
1425 * that some program needs it later on.
1427 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1428 adapter->cycles.read = igb_read_clock;
1429 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1430 adapter->cycles.mult = 1;
1431 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1434 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1437 * Avoid rollover while we initialize by resetting the time counter.
1439 wr32(E1000_SYSTIML, 0x00000000);
1440 wr32(E1000_SYSTIMH, 0x00000000);
1443 * Set registers so that rollover occurs soon to test this.
1445 wr32(E1000_SYSTIML, 0x00000000);
1446 wr32(E1000_SYSTIMH, 0xFF800000);
1449 timecounter_init(&adapter->clock,
1451 ktime_to_ns(ktime_get_real()));
1454 * Synchronize our NIC clock against system wall clock. NIC
1455 * time stamp reading requires ~3us per sample, each sample
1456 * was pretty stable even under load => only require 10
1457 * samples for each offset comparison.
1459 memset(&adapter->compare, 0, sizeof(adapter->compare));
1460 adapter->compare.source = &adapter->clock;
1461 adapter->compare.target = ktime_get_real;
1462 adapter->compare.num_samples = 10;
1463 timecompare_update(&adapter->compare, 0);
1469 "igb: %s: hw %p initialized timer\n",
1470 igb_get_time_str(adapter, buffer),
1475 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1476 /* print bus type/speed/width info */
1477 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1479 ((hw->bus.speed == e1000_bus_speed_2500)
1480 ? "2.5Gb/s" : "unknown"),
1481 ((hw->bus.width == e1000_bus_width_pcie_x4)
1482 ? "Width x4" : (hw->bus.width == e1000_bus_width_pcie_x1)
1483 ? "Width x1" : "unknown"),
1486 igb_read_part_num(hw, &part_num);
1487 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
1488 (part_num >> 8), (part_num & 0xff));
1490 dev_info(&pdev->dev,
1491 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
1492 adapter->msix_entries ? "MSI-X" :
1493 (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
1494 adapter->num_rx_queues, adapter->num_tx_queues);
1499 igb_release_hw_control(adapter);
1501 if (!igb_check_reset_block(hw))
1504 if (hw->flash_address)
1505 iounmap(hw->flash_address);
1507 igb_free_queues(adapter);
1509 iounmap(hw->hw_addr);
1511 free_netdev(netdev);
1513 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1517 pci_disable_device(pdev);
1522 * igb_remove - Device Removal Routine
1523 * @pdev: PCI device information struct
1525 * igb_remove is called by the PCI subsystem to alert the driver
1526 * that it should release a PCI device. The could be caused by a
1527 * Hot-Plug event, or because the driver is going to be removed from
1530 static void __devexit igb_remove(struct pci_dev *pdev)
1532 struct net_device *netdev = pci_get_drvdata(pdev);
1533 struct igb_adapter *adapter = netdev_priv(netdev);
1534 struct e1000_hw *hw = &adapter->hw;
1537 /* flush_scheduled work may reschedule our watchdog task, so
1538 * explicitly disable watchdog tasks from being rescheduled */
1539 set_bit(__IGB_DOWN, &adapter->state);
1540 del_timer_sync(&adapter->watchdog_timer);
1541 del_timer_sync(&adapter->phy_info_timer);
1543 flush_scheduled_work();
1545 #ifdef CONFIG_IGB_DCA
1546 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
1547 dev_info(&pdev->dev, "DCA disabled\n");
1548 dca_remove_requester(&pdev->dev);
1549 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
1550 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
1554 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1555 * would have already happened in close and is redundant. */
1556 igb_release_hw_control(adapter);
1558 unregister_netdev(netdev);
1560 if (!igb_check_reset_block(&adapter->hw))
1561 igb_reset_phy(&adapter->hw);
1563 igb_reset_interrupt_capability(adapter);
1565 igb_free_queues(adapter);
1567 #ifdef CONFIG_PCI_IOV
1568 /* reclaim resources allocated to VFs */
1569 if (adapter->vf_data) {
1570 /* disable iov and allow time for transactions to clear */
1571 pci_disable_sriov(pdev);
1574 kfree(adapter->vf_data);
1575 adapter->vf_data = NULL;
1576 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
1578 dev_info(&pdev->dev, "IOV Disabled\n");
1581 iounmap(hw->hw_addr);
1582 if (hw->flash_address)
1583 iounmap(hw->flash_address);
1584 pci_release_selected_regions(pdev, pci_select_bars(pdev,
1587 free_netdev(netdev);
1589 err = pci_disable_pcie_error_reporting(pdev);
1592 "pci_disable_pcie_error_reporting failed 0x%x\n", err);
1594 pci_disable_device(pdev);
1598 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1599 * @adapter: board private structure to initialize
1601 * igb_sw_init initializes the Adapter private data structure.
1602 * Fields are initialized based on PCI device information and
1603 * OS network device settings (MTU size).
1605 static int __devinit igb_sw_init(struct igb_adapter *adapter)
1607 struct e1000_hw *hw = &adapter->hw;
1608 struct net_device *netdev = adapter->netdev;
1609 struct pci_dev *pdev = adapter->pdev;
1611 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
1613 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1614 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1615 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1616 adapter->rx_ps_hdr_size = 0; /* disable packet split */
1617 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1618 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1620 /* This call may decrease the number of queues depending on
1621 * interrupt mode. */
1622 igb_set_interrupt_capability(adapter);
1624 if (igb_alloc_queues(adapter)) {
1625 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1629 /* Explicitly disable IRQ since the NIC can be in any state. */
1630 igb_irq_disable(adapter);
1632 set_bit(__IGB_DOWN, &adapter->state);
1637 * igb_open - Called when a network interface is made active
1638 * @netdev: network interface device structure
1640 * Returns 0 on success, negative value on failure
1642 * The open entry point is called when a network interface is made
1643 * active by the system (IFF_UP). At this point all resources needed
1644 * for transmit and receive operations are allocated, the interrupt
1645 * handler is registered with the OS, the watchdog timer is started,
1646 * and the stack is notified that the interface is ready.
1648 static int igb_open(struct net_device *netdev)
1650 struct igb_adapter *adapter = netdev_priv(netdev);
1651 struct e1000_hw *hw = &adapter->hw;
1655 /* disallow open during test */
1656 if (test_bit(__IGB_TESTING, &adapter->state))
1659 /* allocate transmit descriptors */
1660 err = igb_setup_all_tx_resources(adapter);
1664 /* allocate receive descriptors */
1665 err = igb_setup_all_rx_resources(adapter);
1669 /* e1000_power_up_phy(adapter); */
1671 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1672 if ((adapter->hw.mng_cookie.status &
1673 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1674 igb_update_mng_vlan(adapter);
1676 /* before we allocate an interrupt, we must be ready to handle it.
1677 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1678 * as soon as we call pci_request_irq, so we have to setup our
1679 * clean_rx handler before we do so. */
1680 igb_configure(adapter);
1682 igb_vmm_control(adapter);
1683 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1684 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1686 err = igb_request_irq(adapter);
1690 /* From here on the code is the same as igb_up() */
1691 clear_bit(__IGB_DOWN, &adapter->state);
1693 for (i = 0; i < adapter->num_rx_queues; i++)
1694 napi_enable(&adapter->rx_ring[i].napi);
1696 /* Clear any pending interrupts. */
1699 igb_irq_enable(adapter);
1701 netif_tx_start_all_queues(netdev);
1703 /* Fire a link status change interrupt to start the watchdog. */
1704 wr32(E1000_ICS, E1000_ICS_LSC);
1709 igb_release_hw_control(adapter);
1710 /* e1000_power_down_phy(adapter); */
1711 igb_free_all_rx_resources(adapter);
1713 igb_free_all_tx_resources(adapter);
1721 * igb_close - Disables a network interface
1722 * @netdev: network interface device structure
1724 * Returns 0, this is not allowed to fail
1726 * The close entry point is called when an interface is de-activated
1727 * by the OS. The hardware is still under the driver's control, but
1728 * needs to be disabled. A global MAC reset is issued to stop the
1729 * hardware, and all transmit and receive resources are freed.
1731 static int igb_close(struct net_device *netdev)
1733 struct igb_adapter *adapter = netdev_priv(netdev);
1735 WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
1738 igb_free_irq(adapter);
1740 igb_free_all_tx_resources(adapter);
1741 igb_free_all_rx_resources(adapter);
1743 /* kill manageability vlan ID if supported, but not if a vlan with
1744 * the same ID is registered on the host OS (let 8021q kill it) */
1745 if ((adapter->hw.mng_cookie.status &
1746 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1748 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1749 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1755 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1756 * @adapter: board private structure
1757 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1759 * Return 0 on success, negative on failure
1761 int igb_setup_tx_resources(struct igb_adapter *adapter,
1762 struct igb_ring *tx_ring)
1764 struct pci_dev *pdev = adapter->pdev;
1767 size = sizeof(struct igb_buffer) * tx_ring->count;
1768 tx_ring->buffer_info = vmalloc(size);
1769 if (!tx_ring->buffer_info)
1771 memset(tx_ring->buffer_info, 0, size);
1773 /* round up to nearest 4K */
1774 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1775 tx_ring->size = ALIGN(tx_ring->size, 4096);
1777 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1783 tx_ring->adapter = adapter;
1784 tx_ring->next_to_use = 0;
1785 tx_ring->next_to_clean = 0;
1789 vfree(tx_ring->buffer_info);
1790 dev_err(&adapter->pdev->dev,
1791 "Unable to allocate memory for the transmit descriptor ring\n");
1796 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
1797 * (Descriptors) for all queues
1798 * @adapter: board private structure
1800 * Return 0 on success, negative on failure
1802 static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1807 for (i = 0; i < adapter->num_tx_queues; i++) {
1808 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1810 dev_err(&adapter->pdev->dev,
1811 "Allocation for Tx Queue %u failed\n", i);
1812 for (i--; i >= 0; i--)
1813 igb_free_tx_resources(&adapter->tx_ring[i]);
1818 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) {
1819 r_idx = i % adapter->num_tx_queues;
1820 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1826 * igb_configure_tx - Configure transmit Unit after Reset
1827 * @adapter: board private structure
1829 * Configure the Tx unit of the MAC after a reset.
1831 static void igb_configure_tx(struct igb_adapter *adapter)
1834 struct e1000_hw *hw = &adapter->hw;
1839 for (i = 0; i < adapter->num_tx_queues; i++) {
1840 struct igb_ring *ring = &adapter->tx_ring[i];
1842 wr32(E1000_TDLEN(j),
1843 ring->count * sizeof(union e1000_adv_tx_desc));
1845 wr32(E1000_TDBAL(j),
1846 tdba & 0x00000000ffffffffULL);
1847 wr32(E1000_TDBAH(j), tdba >> 32);
1849 ring->head = E1000_TDH(j);
1850 ring->tail = E1000_TDT(j);
1851 writel(0, hw->hw_addr + ring->tail);
1852 writel(0, hw->hw_addr + ring->head);
1853 txdctl = rd32(E1000_TXDCTL(j));
1854 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1855 wr32(E1000_TXDCTL(j), txdctl);
1857 /* Turn off Relaxed Ordering on head write-backs. The
1858 * writebacks MUST be delivered in order or it will
1859 * completely screw up our bookeeping.
1861 txctrl = rd32(E1000_DCA_TXCTRL(j));
1862 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1863 wr32(E1000_DCA_TXCTRL(j), txctrl);
1866 /* disable queue 0 to prevent tail bump w/o re-configuration */
1867 if (adapter->vfs_allocated_count)
1868 wr32(E1000_TXDCTL(0), 0);
1870 /* Program the Transmit Control Register */
1871 tctl = rd32(E1000_TCTL);
1872 tctl &= ~E1000_TCTL_CT;
1873 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1874 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1876 igb_config_collision_dist(hw);
1878 /* Setup Transmit Descriptor Settings for eop descriptor */
1879 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1881 /* Enable transmits */
1882 tctl |= E1000_TCTL_EN;
1884 wr32(E1000_TCTL, tctl);
1888 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1889 * @adapter: board private structure
1890 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1892 * Returns 0 on success, negative on failure
1894 int igb_setup_rx_resources(struct igb_adapter *adapter,
1895 struct igb_ring *rx_ring)
1897 struct pci_dev *pdev = adapter->pdev;
1900 size = sizeof(struct igb_buffer) * rx_ring->count;
1901 rx_ring->buffer_info = vmalloc(size);
1902 if (!rx_ring->buffer_info)
1904 memset(rx_ring->buffer_info, 0, size);
1906 desc_len = sizeof(union e1000_adv_rx_desc);
1908 /* Round up to nearest 4K */
1909 rx_ring->size = rx_ring->count * desc_len;
1910 rx_ring->size = ALIGN(rx_ring->size, 4096);
1912 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
1918 rx_ring->next_to_clean = 0;
1919 rx_ring->next_to_use = 0;
1921 rx_ring->adapter = adapter;
1926 vfree(rx_ring->buffer_info);
1927 dev_err(&adapter->pdev->dev, "Unable to allocate memory for "
1928 "the receive descriptor ring\n");
1933 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
1934 * (Descriptors) for all queues
1935 * @adapter: board private structure
1937 * Return 0 on success, negative on failure
1939 static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
1943 for (i = 0; i < adapter->num_rx_queues; i++) {
1944 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1946 dev_err(&adapter->pdev->dev,
1947 "Allocation for Rx Queue %u failed\n", i);
1948 for (i--; i >= 0; i--)
1949 igb_free_rx_resources(&adapter->rx_ring[i]);
1958 * igb_setup_rctl - configure the receive control registers
1959 * @adapter: Board private structure
1961 static void igb_setup_rctl(struct igb_adapter *adapter)
1963 struct e1000_hw *hw = &adapter->hw;
1968 rctl = rd32(E1000_RCTL);
1970 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1971 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1973 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1974 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1977 * enable stripping of CRC. It's unlikely this will break BMC
1978 * redirection as it did with e1000. Newer features require
1979 * that the HW strips the CRC.
1981 rctl |= E1000_RCTL_SECRC;
1984 * disable store bad packets and clear size bits.
1986 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
1988 /* enable LPE when to prevent packets larger than max_frame_size */
1989 rctl |= E1000_RCTL_LPE;
1991 /* Setup buffer sizes */
1992 switch (adapter->rx_buffer_len) {
1993 case IGB_RXBUFFER_256:
1994 rctl |= E1000_RCTL_SZ_256;
1996 case IGB_RXBUFFER_512:
1997 rctl |= E1000_RCTL_SZ_512;
2000 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2001 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2005 /* 82575 and greater support packet-split where the protocol
2006 * header is placed in skb->data and the packet data is
2007 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2008 * In the case of a non-split, skb->data is linearly filled,
2009 * followed by the page buffers. Therefore, skb->data is
2010 * sized to hold the largest protocol header.
2012 /* allocations using alloc_page take too long for regular MTU
2013 * so only enable packet split for jumbo frames */
2014 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2015 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
2016 srrctl |= adapter->rx_ps_hdr_size <<
2017 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2018 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2020 adapter->rx_ps_hdr_size = 0;
2021 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2024 /* Attention!!! For SR-IOV PF driver operations you must enable
2025 * queue drop for all VF and PF queues to prevent head of line blocking
2026 * if an un-trusted VF does not provide descriptors to hardware.
2028 if (adapter->vfs_allocated_count) {
2031 j = adapter->rx_ring[0].reg_idx;
2033 /* set all queue drop enable bits */
2034 wr32(E1000_QDE, ALL_QUEUES);
2035 srrctl |= E1000_SRRCTL_DROP_EN;
2037 /* disable queue 0 to prevent tail write w/o re-config */
2038 wr32(E1000_RXDCTL(0), 0);
2040 vmolr = rd32(E1000_VMOLR(j));
2041 if (rctl & E1000_RCTL_LPE)
2042 vmolr |= E1000_VMOLR_LPE;
2043 if (adapter->num_rx_queues > 0)
2044 vmolr |= E1000_VMOLR_RSSE;
2045 wr32(E1000_VMOLR(j), vmolr);
2048 for (i = 0; i < adapter->num_rx_queues; i++) {
2049 j = adapter->rx_ring[i].reg_idx;
2050 wr32(E1000_SRRCTL(j), srrctl);
2053 wr32(E1000_RCTL, rctl);
2057 * igb_rlpml_set - set maximum receive packet size
2058 * @adapter: board private structure
2060 * Configure maximum receivable packet size.
2062 static void igb_rlpml_set(struct igb_adapter *adapter)
2064 u32 max_frame_size = adapter->max_frame_size;
2065 struct e1000_hw *hw = &adapter->hw;
2066 u16 pf_id = adapter->vfs_allocated_count;
2069 max_frame_size += VLAN_TAG_SIZE;
2071 /* if vfs are enabled we set RLPML to the largest possible request
2072 * size and set the VMOLR RLPML to the size we need */
2074 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2075 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE;
2078 wr32(E1000_RLPML, max_frame_size);
2082 * igb_configure_vt_default_pool - Configure VT default pool
2083 * @adapter: board private structure
2085 * Configure the default pool
2087 static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2089 struct e1000_hw *hw = &adapter->hw;
2090 u16 pf_id = adapter->vfs_allocated_count;
2093 /* not in sr-iov mode - do nothing */
2097 vtctl = rd32(E1000_VT_CTL);
2098 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2099 E1000_VT_CTL_DISABLE_DEF_POOL);
2100 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2101 wr32(E1000_VT_CTL, vtctl);
2105 * igb_configure_rx - Configure receive Unit after Reset
2106 * @adapter: board private structure
2108 * Configure the Rx unit of the MAC after a reset.
2110 static void igb_configure_rx(struct igb_adapter *adapter)
2113 struct e1000_hw *hw = &adapter->hw;
2118 /* disable receives while setting up the descriptors */
2119 rctl = rd32(E1000_RCTL);
2120 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2124 if (adapter->itr_setting > 3)
2125 wr32(E1000_ITR, adapter->itr);
2127 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2128 * the Base and Length of the Rx Descriptor Ring */
2129 for (i = 0; i < adapter->num_rx_queues; i++) {
2130 struct igb_ring *ring = &adapter->rx_ring[i];
2131 int j = ring->reg_idx;
2133 wr32(E1000_RDBAL(j),
2134 rdba & 0x00000000ffffffffULL);
2135 wr32(E1000_RDBAH(j), rdba >> 32);
2136 wr32(E1000_RDLEN(j),
2137 ring->count * sizeof(union e1000_adv_rx_desc));
2139 ring->head = E1000_RDH(j);
2140 ring->tail = E1000_RDT(j);
2141 writel(0, hw->hw_addr + ring->tail);
2142 writel(0, hw->hw_addr + ring->head);
2144 rxdctl = rd32(E1000_RXDCTL(j));
2145 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2146 rxdctl &= 0xFFF00000;
2147 rxdctl |= IGB_RX_PTHRESH;
2148 rxdctl |= IGB_RX_HTHRESH << 8;
2149 rxdctl |= IGB_RX_WTHRESH << 16;
2150 wr32(E1000_RXDCTL(j), rxdctl);
2153 if (adapter->num_rx_queues > 1) {
2162 get_random_bytes(&random[0], 40);
2164 if (hw->mac.type >= e1000_82576)
2168 for (j = 0; j < (32 * 4); j++) {
2170 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2173 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2175 if (adapter->vfs_allocated_count)
2176 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2178 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2180 /* Fill out hash function seeds */
2181 for (j = 0; j < 10; j++)
2182 array_wr32(E1000_RSSRK(0), j, random[j]);
2184 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2185 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2186 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2187 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2188 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2189 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2190 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2191 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2194 wr32(E1000_MRQC, mrqc);
2196 /* Multiqueue and raw packet checksumming are mutually
2197 * exclusive. Note that this not the same as TCP/IP
2198 * checksumming, which works fine. */
2199 rxcsum = rd32(E1000_RXCSUM);
2200 rxcsum |= E1000_RXCSUM_PCSD;
2201 wr32(E1000_RXCSUM, rxcsum);
2203 /* Enable multi-queue for sr-iov */
2204 if (adapter->vfs_allocated_count)
2205 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2206 /* Enable Receive Checksum Offload for TCP and UDP */
2207 rxcsum = rd32(E1000_RXCSUM);
2208 if (adapter->rx_csum)
2209 rxcsum |= E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE;
2211 rxcsum &= ~(E1000_RXCSUM_TUOFL | E1000_RXCSUM_IPPCSE);
2213 wr32(E1000_RXCSUM, rxcsum);
2216 /* Set the default pool for the PF's first queue */
2217 igb_configure_vt_default_pool(adapter);
2219 igb_rlpml_set(adapter);
2221 /* Enable Receives */
2222 wr32(E1000_RCTL, rctl);
2226 * igb_free_tx_resources - Free Tx Resources per Queue
2227 * @tx_ring: Tx descriptor ring for a specific queue
2229 * Free all transmit software resources
2231 void igb_free_tx_resources(struct igb_ring *tx_ring)
2233 struct pci_dev *pdev = tx_ring->adapter->pdev;
2235 igb_clean_tx_ring(tx_ring);
2237 vfree(tx_ring->buffer_info);
2238 tx_ring->buffer_info = NULL;
2240 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2242 tx_ring->desc = NULL;
2246 * igb_free_all_tx_resources - Free Tx Resources for All Queues
2247 * @adapter: board private structure
2249 * Free all transmit software resources
2251 static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2255 for (i = 0; i < adapter->num_tx_queues; i++)
2256 igb_free_tx_resources(&adapter->tx_ring[i]);
2259 static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2260 struct igb_buffer *buffer_info)
2262 if (buffer_info->dma) {
2263 pci_unmap_page(adapter->pdev,
2265 buffer_info->length,
2267 buffer_info->dma = 0;
2269 if (buffer_info->skb) {
2270 dev_kfree_skb_any(buffer_info->skb);
2271 buffer_info->skb = NULL;
2273 buffer_info->time_stamp = 0;
2274 buffer_info->next_to_watch = 0;
2275 /* buffer_info must be completely set up in the transmit path */
2279 * igb_clean_tx_ring - Free Tx Buffers
2280 * @tx_ring: ring to be cleaned
2282 static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2284 struct igb_adapter *adapter = tx_ring->adapter;
2285 struct igb_buffer *buffer_info;
2289 if (!tx_ring->buffer_info)
2291 /* Free all the Tx ring sk_buffs */
2293 for (i = 0; i < tx_ring->count; i++) {
2294 buffer_info = &tx_ring->buffer_info[i];
2295 igb_unmap_and_free_tx_resource(adapter, buffer_info);
2298 size = sizeof(struct igb_buffer) * tx_ring->count;
2299 memset(tx_ring->buffer_info, 0, size);
2301 /* Zero out the descriptor ring */
2303 memset(tx_ring->desc, 0, tx_ring->size);
2305 tx_ring->next_to_use = 0;
2306 tx_ring->next_to_clean = 0;
2308 writel(0, adapter->hw.hw_addr + tx_ring->head);
2309 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2313 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
2314 * @adapter: board private structure
2316 static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2320 for (i = 0; i < adapter->num_tx_queues; i++)
2321 igb_clean_tx_ring(&adapter->tx_ring[i]);
2325 * igb_free_rx_resources - Free Rx Resources
2326 * @rx_ring: ring to clean the resources from
2328 * Free all receive software resources
2330 void igb_free_rx_resources(struct igb_ring *rx_ring)
2332 struct pci_dev *pdev = rx_ring->adapter->pdev;
2334 igb_clean_rx_ring(rx_ring);
2336 vfree(rx_ring->buffer_info);
2337 rx_ring->buffer_info = NULL;
2339 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2341 rx_ring->desc = NULL;
2345 * igb_free_all_rx_resources - Free Rx Resources for All Queues
2346 * @adapter: board private structure
2348 * Free all receive software resources
2350 static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2354 for (i = 0; i < adapter->num_rx_queues; i++)
2355 igb_free_rx_resources(&adapter->rx_ring[i]);
2359 * igb_clean_rx_ring - Free Rx Buffers per Queue
2360 * @rx_ring: ring to free buffers from
2362 static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2364 struct igb_adapter *adapter = rx_ring->adapter;
2365 struct igb_buffer *buffer_info;
2366 struct pci_dev *pdev = adapter->pdev;
2370 if (!rx_ring->buffer_info)
2372 /* Free all the Rx ring sk_buffs */
2373 for (i = 0; i < rx_ring->count; i++) {
2374 buffer_info = &rx_ring->buffer_info[i];
2375 if (buffer_info->dma) {
2376 if (adapter->rx_ps_hdr_size)
2377 pci_unmap_single(pdev, buffer_info->dma,
2378 adapter->rx_ps_hdr_size,
2379 PCI_DMA_FROMDEVICE);
2381 pci_unmap_single(pdev, buffer_info->dma,
2382 adapter->rx_buffer_len,
2383 PCI_DMA_FROMDEVICE);
2384 buffer_info->dma = 0;
2387 if (buffer_info->skb) {
2388 dev_kfree_skb(buffer_info->skb);
2389 buffer_info->skb = NULL;
2391 if (buffer_info->page) {
2392 if (buffer_info->page_dma)
2393 pci_unmap_page(pdev, buffer_info->page_dma,
2395 PCI_DMA_FROMDEVICE);
2396 put_page(buffer_info->page);
2397 buffer_info->page = NULL;
2398 buffer_info->page_dma = 0;
2399 buffer_info->page_offset = 0;
2403 size = sizeof(struct igb_buffer) * rx_ring->count;
2404 memset(rx_ring->buffer_info, 0, size);
2406 /* Zero out the descriptor ring */
2407 memset(rx_ring->desc, 0, rx_ring->size);
2409 rx_ring->next_to_clean = 0;
2410 rx_ring->next_to_use = 0;
2412 writel(0, adapter->hw.hw_addr + rx_ring->head);
2413 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2417 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
2418 * @adapter: board private structure
2420 static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
2424 for (i = 0; i < adapter->num_rx_queues; i++)
2425 igb_clean_rx_ring(&adapter->rx_ring[i]);
2429 * igb_set_mac - Change the Ethernet Address of the NIC
2430 * @netdev: network interface device structure
2431 * @p: pointer to an address structure
2433 * Returns 0 on success, negative on failure
2435 static int igb_set_mac(struct net_device *netdev, void *p)
2437 struct igb_adapter *adapter = netdev_priv(netdev);
2438 struct e1000_hw *hw = &adapter->hw;
2439 struct sockaddr *addr = p;
2441 if (!is_valid_ether_addr(addr->sa_data))
2442 return -EADDRNOTAVAIL;
2444 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2445 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2447 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
2449 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
2455 * igb_set_multi - Multicast and Promiscuous mode set
2456 * @netdev: network interface device structure
2458 * The set_multi entry point is called whenever the multicast address
2459 * list or the network interface flags are updated. This routine is
2460 * responsible for configuring the hardware for proper multicast,
2461 * promiscuous mode, and all-multi behavior.
2463 static void igb_set_multi(struct net_device *netdev)
2465 struct igb_adapter *adapter = netdev_priv(netdev);
2466 struct e1000_hw *hw = &adapter->hw;
2467 struct e1000_mac_info *mac = &hw->mac;
2468 struct dev_mc_list *mc_ptr;
2473 /* Check for Promiscuous and All Multicast modes */
2475 rctl = rd32(E1000_RCTL);
2477 if (netdev->flags & IFF_PROMISC) {
2478 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2479 rctl &= ~E1000_RCTL_VFE;
2481 if (netdev->flags & IFF_ALLMULTI) {
2482 rctl |= E1000_RCTL_MPE;
2483 rctl &= ~E1000_RCTL_UPE;
2485 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2486 rctl |= E1000_RCTL_VFE;
2488 wr32(E1000_RCTL, rctl);
2490 if (!netdev->mc_count) {
2491 /* nothing to program, so clear mc list */
2492 igb_update_mc_addr_list(hw, NULL, 0, 1,
2493 mac->rar_entry_count);
2497 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2501 /* The shared function expects a packed array of only addresses. */
2502 mc_ptr = netdev->mc_list;
2504 for (i = 0; i < netdev->mc_count; i++) {
2507 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2508 mc_ptr = mc_ptr->next;
2510 igb_update_mc_addr_list(hw, mta_list, i,
2511 adapter->vfs_allocated_count + 1,
2512 mac->rar_entry_count);
2514 igb_set_mc_list_pools(adapter, i, mac->rar_entry_count);
2515 igb_restore_vf_multicasts(adapter);
2520 /* Need to wait a few seconds after link up to get diagnostic information from
2522 static void igb_update_phy_info(unsigned long data)
2524 struct igb_adapter *adapter = (struct igb_adapter *) data;
2525 igb_get_phy_info(&adapter->hw);
2529 * igb_has_link - check shared code for link and determine up/down
2530 * @adapter: pointer to driver private info
2532 static bool igb_has_link(struct igb_adapter *adapter)
2534 struct e1000_hw *hw = &adapter->hw;
2535 bool link_active = false;
2538 /* get_link_status is set on LSC (link status) interrupt or
2539 * rx sequence error interrupt. get_link_status will stay
2540 * false until the e1000_check_for_link establishes link
2541 * for copper adapters ONLY
2543 switch (hw->phy.media_type) {
2544 case e1000_media_type_copper:
2545 if (hw->mac.get_link_status) {
2546 ret_val = hw->mac.ops.check_for_link(hw);
2547 link_active = !hw->mac.get_link_status;
2552 case e1000_media_type_fiber:
2553 ret_val = hw->mac.ops.check_for_link(hw);
2554 link_active = !!(rd32(E1000_STATUS) & E1000_STATUS_LU);
2556 case e1000_media_type_internal_serdes:
2557 ret_val = hw->mac.ops.check_for_link(hw);
2558 link_active = hw->mac.serdes_has_link;
2561 case e1000_media_type_unknown:
2569 * igb_watchdog - Timer Call-back
2570 * @data: pointer to adapter cast into an unsigned long
2572 static void igb_watchdog(unsigned long data)
2574 struct igb_adapter *adapter = (struct igb_adapter *)data;
2575 /* Do the rest outside of interrupt context */
2576 schedule_work(&adapter->watchdog_task);
2579 static void igb_watchdog_task(struct work_struct *work)
2581 struct igb_adapter *adapter = container_of(work,
2582 struct igb_adapter, watchdog_task);
2583 struct e1000_hw *hw = &adapter->hw;
2584 struct net_device *netdev = adapter->netdev;
2585 struct igb_ring *tx_ring = adapter->tx_ring;
2590 link = igb_has_link(adapter);
2591 if ((netif_carrier_ok(netdev)) && link)
2595 if (!netif_carrier_ok(netdev)) {
2597 hw->mac.ops.get_speed_and_duplex(&adapter->hw,
2598 &adapter->link_speed,
2599 &adapter->link_duplex);
2601 ctrl = rd32(E1000_CTRL);
2602 /* Links status message must follow this format */
2603 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2604 "Flow Control: %s\n",
2606 adapter->link_speed,
2607 adapter->link_duplex == FULL_DUPLEX ?
2608 "Full Duplex" : "Half Duplex",
2609 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2610 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2611 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2612 E1000_CTRL_TFCE) ? "TX" : "None")));
2614 /* tweak tx_queue_len according to speed/duplex and
2615 * adjust the timeout factor */
2616 netdev->tx_queue_len = adapter->tx_queue_len;
2617 adapter->tx_timeout_factor = 1;
2618 switch (adapter->link_speed) {
2620 netdev->tx_queue_len = 10;
2621 adapter->tx_timeout_factor = 14;
2624 netdev->tx_queue_len = 100;
2625 /* maybe add some timeout factor ? */
2629 netif_carrier_on(netdev);
2630 netif_tx_wake_all_queues(netdev);
2632 igb_ping_all_vfs(adapter);
2634 /* link state has changed, schedule phy info update */
2635 if (!test_bit(__IGB_DOWN, &adapter->state))
2636 mod_timer(&adapter->phy_info_timer,
2637 round_jiffies(jiffies + 2 * HZ));
2640 if (netif_carrier_ok(netdev)) {
2641 adapter->link_speed = 0;
2642 adapter->link_duplex = 0;
2643 /* Links status message must follow this format */
2644 printk(KERN_INFO "igb: %s NIC Link is Down\n",
2646 netif_carrier_off(netdev);
2647 netif_tx_stop_all_queues(netdev);
2649 igb_ping_all_vfs(adapter);
2651 /* link state has changed, schedule phy info update */
2652 if (!test_bit(__IGB_DOWN, &adapter->state))
2653 mod_timer(&adapter->phy_info_timer,
2654 round_jiffies(jiffies + 2 * HZ));
2659 igb_update_stats(adapter);
2661 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2662 adapter->tpt_old = adapter->stats.tpt;
2663 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old;
2664 adapter->colc_old = adapter->stats.colc;
2666 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2667 adapter->gorc_old = adapter->stats.gorc;
2668 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2669 adapter->gotc_old = adapter->stats.gotc;
2671 igb_update_adaptive(&adapter->hw);
2673 if (!netif_carrier_ok(netdev)) {
2674 if (IGB_DESC_UNUSED(tx_ring) + 1 < tx_ring->count) {
2675 /* We've lost link, so the controller stops DMA,
2676 * but we've got queued Tx work that's never going
2677 * to get done, so reset controller to flush Tx.
2678 * (Do the reset outside of interrupt context). */
2679 adapter->tx_timeout_count++;
2680 schedule_work(&adapter->reset_task);
2684 /* Cause software interrupt to ensure rx ring is cleaned */
2685 if (adapter->msix_entries) {
2686 for (i = 0; i < adapter->num_rx_queues; i++)
2687 eics |= adapter->rx_ring[i].eims_value;
2688 wr32(E1000_EICS, eics);
2690 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2693 /* Force detection of hung controller every watchdog period */
2694 tx_ring->detect_tx_hung = true;
2696 /* Reset the timer */
2697 if (!test_bit(__IGB_DOWN, &adapter->state))
2698 mod_timer(&adapter->watchdog_timer,
2699 round_jiffies(jiffies + 2 * HZ));
2702 enum latency_range {
2706 latency_invalid = 255
2711 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2713 * Stores a new ITR value based on strictly on packet size. This
2714 * algorithm is less sophisticated than that used in igb_update_itr,
2715 * due to the difficulty of synchronizing statistics across multiple
2716 * receive rings. The divisors and thresholds used by this fuction
2717 * were determined based on theoretical maximum wire speed and testing
2718 * data, in order to minimize response time while increasing bulk
2720 * This functionality is controlled by the InterruptThrottleRate module
2721 * parameter (see igb_param.c)
2722 * NOTE: This function is called only when operating in a multiqueue
2723 * receive environment.
2724 * @rx_ring: pointer to ring
2726 static void igb_update_ring_itr(struct igb_ring *rx_ring)
2728 int new_val = rx_ring->itr_val;
2729 int avg_wire_size = 0;
2730 struct igb_adapter *adapter = rx_ring->adapter;
2732 if (!rx_ring->total_packets)
2733 goto clear_counts; /* no packets, so don't do anything */
2735 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2736 * ints/sec - ITR timer value of 120 ticks.
2738 if (adapter->link_speed != SPEED_1000) {
2742 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets;
2744 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2745 avg_wire_size += 24;
2747 /* Don't starve jumbo frames */
2748 avg_wire_size = min(avg_wire_size, 3000);
2750 /* Give a little boost to mid-size frames */
2751 if ((avg_wire_size > 300) && (avg_wire_size < 1200))
2752 new_val = avg_wire_size / 3;
2754 new_val = avg_wire_size / 2;
2757 if (new_val != rx_ring->itr_val) {
2758 rx_ring->itr_val = new_val;
2759 rx_ring->set_itr = 1;
2762 rx_ring->total_bytes = 0;
2763 rx_ring->total_packets = 0;
2767 * igb_update_itr - update the dynamic ITR value based on statistics
2768 * Stores a new ITR value based on packets and byte
2769 * counts during the last interrupt. The advantage of per interrupt
2770 * computation is faster updates and more accurate ITR for the current
2771 * traffic pattern. Constants in this function were computed
2772 * based on theoretical maximum wire speed and thresholds were set based
2773 * on testing data as well as attempting to minimize response time
2774 * while increasing bulk throughput.
2775 * this functionality is controlled by the InterruptThrottleRate module
2776 * parameter (see igb_param.c)
2777 * NOTE: These calculations are only valid when operating in a single-
2778 * queue environment.
2779 * @adapter: pointer to adapter
2780 * @itr_setting: current adapter->itr
2781 * @packets: the number of packets during this measurement interval
2782 * @bytes: the number of bytes during this measurement interval
2784 static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting,
2785 int packets, int bytes)
2787 unsigned int retval = itr_setting;
2790 goto update_itr_done;
2792 switch (itr_setting) {
2793 case lowest_latency:
2794 /* handle TSO and jumbo frames */
2795 if (bytes/packets > 8000)
2796 retval = bulk_latency;
2797 else if ((packets < 5) && (bytes > 512))
2798 retval = low_latency;
2800 case low_latency: /* 50 usec aka 20000 ints/s */
2801 if (bytes > 10000) {
2802 /* this if handles the TSO accounting */
2803 if (bytes/packets > 8000) {
2804 retval = bulk_latency;
2805 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
2806 retval = bulk_latency;
2807 } else if ((packets > 35)) {
2808 retval = lowest_latency;
2810 } else if (bytes/packets > 2000) {
2811 retval = bulk_latency;
2812 } else if (packets <= 2 && bytes < 512) {
2813 retval = lowest_latency;
2816 case bulk_latency: /* 250 usec aka 4000 ints/s */
2817 if (bytes > 25000) {
2819 retval = low_latency;
2820 } else if (bytes < 1500) {
2821 retval = low_latency;
2830 static void igb_set_itr(struct igb_adapter *adapter)
2833 u32 new_itr = adapter->itr;
2835 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2836 if (adapter->link_speed != SPEED_1000) {
2842 adapter->rx_itr = igb_update_itr(adapter,
2844 adapter->rx_ring->total_packets,
2845 adapter->rx_ring->total_bytes);
2847 if (adapter->rx_ring->buddy) {
2848 adapter->tx_itr = igb_update_itr(adapter,
2850 adapter->tx_ring->total_packets,
2851 adapter->tx_ring->total_bytes);
2852 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2854 current_itr = adapter->rx_itr;
2857 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2858 if (adapter->itr_setting == 3 && current_itr == lowest_latency)
2859 current_itr = low_latency;
2861 switch (current_itr) {
2862 /* counts and packets in update_itr are dependent on these numbers */
2863 case lowest_latency:
2867 new_itr = 20000; /* aka hwitr = ~200 */
2877 adapter->rx_ring->total_bytes = 0;
2878 adapter->rx_ring->total_packets = 0;
2879 if (adapter->rx_ring->buddy) {
2880 adapter->rx_ring->buddy->total_bytes = 0;
2881 adapter->rx_ring->buddy->total_packets = 0;
2884 if (new_itr != adapter->itr) {
2885 /* this attempts to bias the interrupt rate towards Bulk
2886 * by adding intermediate steps when interrupt rate is
2888 new_itr = new_itr > adapter->itr ?
2889 min(adapter->itr + (new_itr >> 2), new_itr) :
2891 /* Don't write the value here; it resets the adapter's
2892 * internal timer, and causes us to delay far longer than
2893 * we should between interrupts. Instead, we write the ITR
2894 * value at the beginning of the next interrupt so the timing
2895 * ends up being correct.
2897 adapter->itr = new_itr;
2898 adapter->rx_ring->itr_val = 1000000000 / (new_itr * 256);
2899 adapter->rx_ring->set_itr = 1;
2906 #define IGB_TX_FLAGS_CSUM 0x00000001
2907 #define IGB_TX_FLAGS_VLAN 0x00000002
2908 #define IGB_TX_FLAGS_TSO 0x00000004
2909 #define IGB_TX_FLAGS_IPV4 0x00000008
2910 #define IGB_TX_FLAGS_TSTAMP 0x00000010
2911 #define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
2912 #define IGB_TX_FLAGS_VLAN_SHIFT 16
2914 static inline int igb_tso_adv(struct igb_adapter *adapter,
2915 struct igb_ring *tx_ring,
2916 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2918 struct e1000_adv_tx_context_desc *context_desc;
2921 struct igb_buffer *buffer_info;
2922 u32 info = 0, tu_cmd = 0;
2923 u32 mss_l4len_idx, l4len;
2926 if (skb_header_cloned(skb)) {
2927 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2932 l4len = tcp_hdrlen(skb);
2935 if (skb->protocol == htons(ETH_P_IP)) {
2936 struct iphdr *iph = ip_hdr(skb);
2939 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2943 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
2944 ipv6_hdr(skb)->payload_len = 0;
2945 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2946 &ipv6_hdr(skb)->daddr,
2950 i = tx_ring->next_to_use;
2952 buffer_info = &tx_ring->buffer_info[i];
2953 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
2954 /* VLAN MACLEN IPLEN */
2955 if (tx_flags & IGB_TX_FLAGS_VLAN)
2956 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
2957 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
2958 *hdr_len += skb_network_offset(skb);
2959 info |= skb_network_header_len(skb);
2960 *hdr_len += skb_network_header_len(skb);
2961 context_desc->vlan_macip_lens = cpu_to_le32(info);
2963 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2964 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
2966 if (skb->protocol == htons(ETH_P_IP))
2967 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
2968 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
2970 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
2973 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
2974 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
2976 /* For 82575, context index must be unique per ring. */
2977 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
2978 mss_l4len_idx |= tx_ring->queue_index << 4;
2980 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2981 context_desc->seqnum_seed = 0;
2983 buffer_info->time_stamp = jiffies;
2984 buffer_info->next_to_watch = i;
2985 buffer_info->dma = 0;
2987 if (i == tx_ring->count)
2990 tx_ring->next_to_use = i;
2995 static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
2996 struct igb_ring *tx_ring,
2997 struct sk_buff *skb, u32 tx_flags)
2999 struct e1000_adv_tx_context_desc *context_desc;
3001 struct igb_buffer *buffer_info;
3002 u32 info = 0, tu_cmd = 0;
3004 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3005 (tx_flags & IGB_TX_FLAGS_VLAN)) {
3006 i = tx_ring->next_to_use;
3007 buffer_info = &tx_ring->buffer_info[i];
3008 context_desc = E1000_TX_CTXTDESC_ADV(*tx_ring, i);
3010 if (tx_flags & IGB_TX_FLAGS_VLAN)
3011 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3012 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3013 if (skb->ip_summed == CHECKSUM_PARTIAL)
3014 info |= skb_network_header_len(skb);
3016 context_desc->vlan_macip_lens = cpu_to_le32(info);
3018 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
3020 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3021 switch (skb->protocol) {
3022 case cpu_to_be16(ETH_P_IP):
3023 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
3024 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
3025 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3027 case cpu_to_be16(ETH_P_IPV6):
3028 /* XXX what about other V6 headers?? */
3029 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3030 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
3033 if (unlikely(net_ratelimit()))
3034 dev_warn(&adapter->pdev->dev,
3035 "partial checksum but proto=%x!\n",
3041 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3042 context_desc->seqnum_seed = 0;
3043 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX)
3044 context_desc->mss_l4len_idx =
3045 cpu_to_le32(tx_ring->queue_index << 4);
3047 context_desc->mss_l4len_idx = 0;
3049 buffer_info->time_stamp = jiffies;
3050 buffer_info->next_to_watch = i;
3051 buffer_info->dma = 0;
3054 if (i == tx_ring->count)
3056 tx_ring->next_to_use = i;
3063 #define IGB_MAX_TXD_PWR 16
3064 #define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3066 static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3067 struct igb_ring *tx_ring, struct sk_buff *skb,
3070 struct igb_buffer *buffer_info;
3071 unsigned int len = skb_headlen(skb);
3072 unsigned int count = 0, i;
3075 i = tx_ring->next_to_use;
3077 buffer_info = &tx_ring->buffer_info[i];
3078 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3079 buffer_info->length = len;
3080 /* set time_stamp *before* dma to help avoid a possible race */
3081 buffer_info->time_stamp = jiffies;
3082 buffer_info->next_to_watch = i;
3083 buffer_info->dma = pci_map_single(adapter->pdev, skb->data, len,
3087 if (i == tx_ring->count)
3090 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3091 struct skb_frag_struct *frag;
3093 frag = &skb_shinfo(skb)->frags[f];
3096 buffer_info = &tx_ring->buffer_info[i];
3097 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3098 buffer_info->length = len;
3099 buffer_info->time_stamp = jiffies;
3100 buffer_info->next_to_watch = i;
3101 buffer_info->dma = pci_map_page(adapter->pdev,
3109 if (i == tx_ring->count)
3113 i = ((i == 0) ? tx_ring->count - 1 : i - 1);
3114 tx_ring->buffer_info[i].skb = skb;
3115 tx_ring->buffer_info[first].next_to_watch = i;
3120 static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3121 struct igb_ring *tx_ring,
3122 int tx_flags, int count, u32 paylen,
3125 union e1000_adv_tx_desc *tx_desc = NULL;
3126 struct igb_buffer *buffer_info;
3127 u32 olinfo_status = 0, cmd_type_len;
3130 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3131 E1000_ADVTXD_DCMD_DEXT);
3133 if (tx_flags & IGB_TX_FLAGS_VLAN)
3134 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
3136 if (tx_flags & IGB_TX_FLAGS_TSTAMP)
3137 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
3139 if (tx_flags & IGB_TX_FLAGS_TSO) {
3140 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
3142 /* insert tcp checksum */
3143 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3145 /* insert ip checksum */
3146 if (tx_flags & IGB_TX_FLAGS_IPV4)
3147 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
3149 } else if (tx_flags & IGB_TX_FLAGS_CSUM) {
3150 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3153 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) &&
3154 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO |
3155 IGB_TX_FLAGS_VLAN)))
3156 olinfo_status |= tx_ring->queue_index << 4;
3158 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3160 i = tx_ring->next_to_use;
3162 buffer_info = &tx_ring->buffer_info[i];
3163 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3164 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3165 tx_desc->read.cmd_type_len =
3166 cpu_to_le32(cmd_type_len | buffer_info->length);
3167 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3169 if (i == tx_ring->count)
3173 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
3174 /* Force memory writes to complete before letting h/w
3175 * know there are new descriptors to fetch. (Only
3176 * applicable for weak-ordered memory model archs,
3177 * such as IA-64). */
3180 tx_ring->next_to_use = i;
3181 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3182 /* we need this if more than one processor can write to our tail
3183 * at a time, it syncronizes IO on IA64/Altix systems */
3187 static int __igb_maybe_stop_tx(struct net_device *netdev,
3188 struct igb_ring *tx_ring, int size)
3190 struct igb_adapter *adapter = netdev_priv(netdev);
3192 netif_stop_subqueue(netdev, tx_ring->queue_index);
3194 /* Herbert's original patch had:
3195 * smp_mb__after_netif_stop_queue();
3196 * but since that doesn't exist yet, just open code it. */
3199 /* We need to check again in a case another CPU has just
3200 * made room available. */
3201 if (IGB_DESC_UNUSED(tx_ring) < size)
3205 netif_wake_subqueue(netdev, tx_ring->queue_index);
3206 ++adapter->restart_queue;
3210 static int igb_maybe_stop_tx(struct net_device *netdev,
3211 struct igb_ring *tx_ring, int size)
3213 if (IGB_DESC_UNUSED(tx_ring) >= size)
3215 return __igb_maybe_stop_tx(netdev, tx_ring, size);
3218 static int igb_xmit_frame_ring_adv(struct sk_buff *skb,
3219 struct net_device *netdev,
3220 struct igb_ring *tx_ring)
3222 struct igb_adapter *adapter = netdev_priv(netdev);
3224 unsigned int tx_flags = 0;
3227 union skb_shared_tx *shtx;
3229 if (test_bit(__IGB_DOWN, &adapter->state)) {
3230 dev_kfree_skb_any(skb);
3231 return NETDEV_TX_OK;
3234 if (skb->len <= 0) {
3235 dev_kfree_skb_any(skb);
3236 return NETDEV_TX_OK;
3239 /* need: 1 descriptor per page,
3240 * + 2 desc gap to keep tail from touching head,
3241 * + 1 desc for skb->data,
3242 * + 1 desc for context descriptor,
3243 * otherwise try next time */
3244 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3245 /* this is a hard error */
3246 return NETDEV_TX_BUSY;
3250 * TODO: check that there currently is no other packet with
3251 * time stamping in the queue
3253 * When doing time stamping, keep the connection to the socket
3254 * a while longer: it is still needed by skb_hwtstamp_tx(),
3255 * called either in igb_tx_hwtstamp() or by our caller when
3256 * doing software time stamping.
3259 if (unlikely(shtx->hardware)) {
3260 shtx->in_progress = 1;
3261 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3264 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
3265 tx_flags |= IGB_TX_FLAGS_VLAN;
3266 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3269 if (skb->protocol == htons(ETH_P_IP))
3270 tx_flags |= IGB_TX_FLAGS_IPV4;
3272 first = tx_ring->next_to_use;
3273 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags,
3277 dev_kfree_skb_any(skb);
3278 return NETDEV_TX_OK;
3282 tx_flags |= IGB_TX_FLAGS_TSO;
3283 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) &&
3284 (skb->ip_summed == CHECKSUM_PARTIAL))
3285 tx_flags |= IGB_TX_FLAGS_CSUM;
3287 igb_tx_queue_adv(adapter, tx_ring, tx_flags,
3288 igb_tx_map_adv(adapter, tx_ring, skb, first),
3291 netdev->trans_start = jiffies;
3293 /* Make sure there is space in the ring for the next send. */
3294 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3296 return NETDEV_TX_OK;
3299 static int igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *netdev)
3301 struct igb_adapter *adapter = netdev_priv(netdev);
3302 struct igb_ring *tx_ring;
3305 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3306 tx_ring = adapter->multi_tx_table[r_idx];
3308 /* This goes back to the question of how to logically map a tx queue
3309 * to a flow. Right now, performance is impacted slightly negatively
3310 * if using multiple tx queues. If the stack breaks away from a
3311 * single qdisc implementation, we can look at this again. */
3312 return (igb_xmit_frame_ring_adv(skb, netdev, tx_ring));
3316 * igb_tx_timeout - Respond to a Tx Hang
3317 * @netdev: network interface device structure
3319 static void igb_tx_timeout(struct net_device *netdev)
3321 struct igb_adapter *adapter = netdev_priv(netdev);
3322 struct e1000_hw *hw = &adapter->hw;
3324 /* Do the reset outside of interrupt context */
3325 adapter->tx_timeout_count++;
3326 schedule_work(&adapter->reset_task);
3328 (adapter->eims_enable_mask & ~adapter->eims_other));
3331 static void igb_reset_task(struct work_struct *work)
3333 struct igb_adapter *adapter;
3334 adapter = container_of(work, struct igb_adapter, reset_task);
3336 igb_reinit_locked(adapter);
3340 * igb_get_stats - Get System Network Statistics
3341 * @netdev: network interface device structure
3343 * Returns the address of the device statistics structure.
3344 * The statistics are actually updated from the timer callback.
3346 static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3348 struct igb_adapter *adapter = netdev_priv(netdev);
3350 /* only return the current stats */
3351 return &adapter->net_stats;
3355 * igb_change_mtu - Change the Maximum Transfer Unit
3356 * @netdev: network interface device structure
3357 * @new_mtu: new value for maximum frame size
3359 * Returns 0 on success, negative on failure
3361 static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3363 struct igb_adapter *adapter = netdev_priv(netdev);
3364 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3366 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
3367 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3368 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3372 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3373 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
3377 while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
3380 /* igb_down has a dependency on max_frame_size */
3381 adapter->max_frame_size = max_frame;
3382 if (netif_running(netdev))
3385 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3386 * means we reserve 2 more, this pushes us to allocate from the next
3388 * i.e. RXBUFFER_2048 --> size-4096 slab
3391 if (max_frame <= IGB_RXBUFFER_256)
3392 adapter->rx_buffer_len = IGB_RXBUFFER_256;
3393 else if (max_frame <= IGB_RXBUFFER_512)
3394 adapter->rx_buffer_len = IGB_RXBUFFER_512;
3395 else if (max_frame <= IGB_RXBUFFER_1024)
3396 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3397 else if (max_frame <= IGB_RXBUFFER_2048)
3398 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3400 #if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
3401 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3403 adapter->rx_buffer_len = PAGE_SIZE / 2;
3406 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3407 if (adapter->vfs_allocated_count &&
3408 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3409 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3411 /* adjust allocation if LPE protects us, and we aren't using SBP */
3412 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3413 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3414 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3416 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
3417 netdev->mtu, new_mtu);
3418 netdev->mtu = new_mtu;
3420 if (netif_running(netdev))
3425 clear_bit(__IGB_RESETTING, &adapter->state);
3431 * igb_update_stats - Update the board statistics counters
3432 * @adapter: board private structure
3435 void igb_update_stats(struct igb_adapter *adapter)
3437 struct e1000_hw *hw = &adapter->hw;
3438 struct pci_dev *pdev = adapter->pdev;
3441 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3444 * Prevent stats update while adapter is being reset, or if the pci
3445 * connection is down.
3447 if (adapter->link_speed == 0)
3449 if (pci_channel_offline(pdev))
3452 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3453 adapter->stats.gprc += rd32(E1000_GPRC);
3454 adapter->stats.gorc += rd32(E1000_GORCL);
3455 rd32(E1000_GORCH); /* clear GORCL */
3456 adapter->stats.bprc += rd32(E1000_BPRC);
3457 adapter->stats.mprc += rd32(E1000_MPRC);
3458 adapter->stats.roc += rd32(E1000_ROC);
3460 adapter->stats.prc64 += rd32(E1000_PRC64);
3461 adapter->stats.prc127 += rd32(E1000_PRC127);
3462 adapter->stats.prc255 += rd32(E1000_PRC255);
3463 adapter->stats.prc511 += rd32(E1000_PRC511);
3464 adapter->stats.prc1023 += rd32(E1000_PRC1023);
3465 adapter->stats.prc1522 += rd32(E1000_PRC1522);
3466 adapter->stats.symerrs += rd32(E1000_SYMERRS);
3467 adapter->stats.sec += rd32(E1000_SEC);
3469 adapter->stats.mpc += rd32(E1000_MPC);
3470 adapter->stats.scc += rd32(E1000_SCC);
3471 adapter->stats.ecol += rd32(E1000_ECOL);
3472 adapter->stats.mcc += rd32(E1000_MCC);
3473 adapter->stats.latecol += rd32(E1000_LATECOL);
3474 adapter->stats.dc += rd32(E1000_DC);
3475 adapter->stats.rlec += rd32(E1000_RLEC);
3476 adapter->stats.xonrxc += rd32(E1000_XONRXC);
3477 adapter->stats.xontxc += rd32(E1000_XONTXC);
3478 adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
3479 adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
3480 adapter->stats.fcruc += rd32(E1000_FCRUC);
3481 adapter->stats.gptc += rd32(E1000_GPTC);
3482 adapter->stats.gotc += rd32(E1000_GOTCL);
3483 rd32(E1000_GOTCH); /* clear GOTCL */
3484 adapter->stats.rnbc += rd32(E1000_RNBC);
3485 adapter->stats.ruc += rd32(E1000_RUC);
3486 adapter->stats.rfc += rd32(E1000_RFC);
3487 adapter->stats.rjc += rd32(E1000_RJC);
3488 adapter->stats.tor += rd32(E1000_TORH);
3489 adapter->stats.tot += rd32(E1000_TOTH);
3490 adapter->stats.tpr += rd32(E1000_TPR);
3492 adapter->stats.ptc64 += rd32(E1000_PTC64);
3493 adapter->stats.ptc127 += rd32(E1000_PTC127);
3494 adapter->stats.ptc255 += rd32(E1000_PTC255);
3495 adapter->stats.ptc511 += rd32(E1000_PTC511);
3496 adapter->stats.ptc1023 += rd32(E1000_PTC1023);
3497 adapter->stats.ptc1522 += rd32(E1000_PTC1522);
3499 adapter->stats.mptc += rd32(E1000_MPTC);
3500 adapter->stats.bptc += rd32(E1000_BPTC);
3502 /* used for adaptive IFS */
3504 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3505 adapter->stats.tpt += hw->mac.tx_packet_delta;
3506 hw->mac.collision_delta = rd32(E1000_COLC);
3507 adapter->stats.colc += hw->mac.collision_delta;
3509 adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
3510 adapter->stats.rxerrc += rd32(E1000_RXERRC);
3511 adapter->stats.tncrs += rd32(E1000_TNCRS);
3512 adapter->stats.tsctc += rd32(E1000_TSCTC);
3513 adapter->stats.tsctfc += rd32(E1000_TSCTFC);
3515 adapter->stats.iac += rd32(E1000_IAC);
3516 adapter->stats.icrxoc += rd32(E1000_ICRXOC);
3517 adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
3518 adapter->stats.icrxatc += rd32(E1000_ICRXATC);
3519 adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
3520 adapter->stats.ictxatc += rd32(E1000_ICTXATC);
3521 adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
3522 adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
3523 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3525 /* Fill out the OS statistics structure */
3526 adapter->net_stats.multicast = adapter->stats.mprc;
3527 adapter->net_stats.collisions = adapter->stats.colc;
3531 /* RLEC on some newer hardware can be incorrect so build
3532 * our own version based on RUC and ROC */
3533 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3534 adapter->stats.crcerrs + adapter->stats.algnerrc +
3535 adapter->stats.ruc + adapter->stats.roc +
3536 adapter->stats.cexterr;
3537 adapter->net_stats.rx_length_errors = adapter->stats.ruc +
3539 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3540 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3541 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3544 adapter->net_stats.tx_errors = adapter->stats.ecol +
3545 adapter->stats.latecol;
3546 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3547 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3548 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3550 /* Tx Dropped needs to be maintained elsewhere */
3553 if (hw->phy.media_type == e1000_media_type_copper) {
3554 if ((adapter->link_speed == SPEED_1000) &&
3555 (!igb_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3556 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3557 adapter->phy_stats.idle_errors += phy_tmp;
3561 /* Management Stats */
3562 adapter->stats.mgptc += rd32(E1000_MGTPTC);
3563 adapter->stats.mgprc += rd32(E1000_MGTPRC);
3564 adapter->stats.mgpdc += rd32(E1000_MGTPDC);
3567 static irqreturn_t igb_msix_other(int irq, void *data)
3569 struct net_device *netdev = data;
3570 struct igb_adapter *adapter = netdev_priv(netdev);
3571 struct e1000_hw *hw = &adapter->hw;
3572 u32 icr = rd32(E1000_ICR);
3574 /* reading ICR causes bit 31 of EICR to be cleared */
3576 if(icr & E1000_ICR_DOUTSYNC) {
3577 /* HW is reporting DMA is out of sync */
3578 adapter->stats.doosync++;
3581 /* Check for a mailbox event */
3582 if (icr & E1000_ICR_VMMB)
3583 igb_msg_task(adapter);
3585 if (icr & E1000_ICR_LSC) {
3586 hw->mac.get_link_status = 1;
3587 /* guard against interrupt when we're going down */
3588 if (!test_bit(__IGB_DOWN, &adapter->state))
3589 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3592 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB);
3593 wr32(E1000_EIMS, adapter->eims_other);
3598 static irqreturn_t igb_msix_tx(int irq, void *data)
3600 struct igb_ring *tx_ring = data;
3601 struct igb_adapter *adapter = tx_ring->adapter;
3602 struct e1000_hw *hw = &adapter->hw;
3604 #ifdef CONFIG_IGB_DCA
3605 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3606 igb_update_tx_dca(tx_ring);
3609 tx_ring->total_bytes = 0;
3610 tx_ring->total_packets = 0;
3612 /* auto mask will automatically reenable the interrupt when we write
3614 if (!igb_clean_tx_irq(tx_ring))
3615 /* Ring was not completely cleaned, so fire another interrupt */
3616 wr32(E1000_EICS, tx_ring->eims_value);
3618 wr32(E1000_EIMS, tx_ring->eims_value);
3623 static void igb_write_itr(struct igb_ring *ring)
3625 struct e1000_hw *hw = &ring->adapter->hw;
3626 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3627 switch (hw->mac.type) {
3629 wr32(ring->itr_register, ring->itr_val |
3633 wr32(ring->itr_register, ring->itr_val |
3634 (ring->itr_val << 16));
3641 static irqreturn_t igb_msix_rx(int irq, void *data)
3643 struct igb_ring *rx_ring = data;
3645 /* Write the ITR value calculated at the end of the
3646 * previous interrupt.
3649 igb_write_itr(rx_ring);
3651 if (napi_schedule_prep(&rx_ring->napi))
3652 __napi_schedule(&rx_ring->napi);
3654 #ifdef CONFIG_IGB_DCA
3655 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3656 igb_update_rx_dca(rx_ring);
3661 #ifdef CONFIG_IGB_DCA
3662 static void igb_update_rx_dca(struct igb_ring *rx_ring)
3665 struct igb_adapter *adapter = rx_ring->adapter;
3666 struct e1000_hw *hw = &adapter->hw;
3667 int cpu = get_cpu();
3668 int q = rx_ring->reg_idx;
3670 if (rx_ring->cpu != cpu) {
3671 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
3672 if (hw->mac.type == e1000_82576) {
3673 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
3674 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3675 E1000_DCA_RXCTRL_CPUID_SHIFT;
3677 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3678 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3680 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3681 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3682 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3683 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3689 static void igb_update_tx_dca(struct igb_ring *tx_ring)
3692 struct igb_adapter *adapter = tx_ring->adapter;
3693 struct e1000_hw *hw = &adapter->hw;
3694 int cpu = get_cpu();
3695 int q = tx_ring->reg_idx;
3697 if (tx_ring->cpu != cpu) {
3698 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3699 if (hw->mac.type == e1000_82576) {
3700 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3701 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3702 E1000_DCA_TXCTRL_CPUID_SHIFT;
3704 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3705 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3707 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3708 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3714 static void igb_setup_dca(struct igb_adapter *adapter)
3718 if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
3721 for (i = 0; i < adapter->num_tx_queues; i++) {
3722 adapter->tx_ring[i].cpu = -1;
3723 igb_update_tx_dca(&adapter->tx_ring[i]);
3725 for (i = 0; i < adapter->num_rx_queues; i++) {
3726 adapter->rx_ring[i].cpu = -1;
3727 igb_update_rx_dca(&adapter->rx_ring[i]);
3731 static int __igb_notify_dca(struct device *dev, void *data)
3733 struct net_device *netdev = dev_get_drvdata(dev);
3734 struct igb_adapter *adapter = netdev_priv(netdev);
3735 struct e1000_hw *hw = &adapter->hw;
3736 unsigned long event = *(unsigned long *)data;
3739 case DCA_PROVIDER_ADD:
3740 /* if already enabled, don't do it again */
3741 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3743 /* Always use CB2 mode, difference is masked
3744 * in the CB driver. */
3745 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3746 if (dca_add_requester(dev) == 0) {
3747 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3748 dev_info(&adapter->pdev->dev, "DCA enabled\n");
3749 igb_setup_dca(adapter);
3752 /* Fall Through since DCA is disabled. */
3753 case DCA_PROVIDER_REMOVE:
3754 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3755 /* without this a class_device is left
3756 * hanging around in the sysfs model */
3757 dca_remove_requester(dev);
3758 dev_info(&adapter->pdev->dev, "DCA disabled\n");
3759 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3760 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3768 static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
3773 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
3776 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3778 #endif /* CONFIG_IGB_DCA */
3780 static void igb_ping_all_vfs(struct igb_adapter *adapter)
3782 struct e1000_hw *hw = &adapter->hw;
3786 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3787 ping = E1000_PF_CONTROL_MSG;
3788 if (adapter->vf_data[i].clear_to_send)
3789 ping |= E1000_VT_MSGTYPE_CTS;
3790 igb_write_mbx(hw, &ping, 1, i);
3794 static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3795 u32 *msgbuf, u32 vf)
3797 int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3798 u16 *hash_list = (u16 *)&msgbuf[1];
3799 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3802 /* only up to 30 hash values supported */
3806 /* salt away the number of multi cast addresses assigned
3807 * to this VF for later use to restore when the PF multi cast
3810 vf_data->num_vf_mc_hashes = n;
3812 /* VFs are limited to using the MTA hash table for their multicast
3814 for (i = 0; i < n; i++)
3815 vf_data->vf_mc_hashes[i] = hash_list[i];;
3817 /* Flush and reset the mta with the new values */
3818 igb_set_multi(adapter->netdev);
3823 static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3825 struct e1000_hw *hw = &adapter->hw;
3826 struct vf_data_storage *vf_data;
3829 for (i = 0; i < adapter->vfs_allocated_count; i++) {
3830 vf_data = &adapter->vf_data[i];
3831 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
3832 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
3836 static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
3838 struct e1000_hw *hw = &adapter->hw;
3839 u32 pool_mask, reg, vid;
3842 pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3844 /* Find the vlan filter for this id */
3845 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3846 reg = rd32(E1000_VLVF(i));
3848 /* remove the vf from the pool */
3851 /* if pool is empty then remove entry from vfta */
3852 if (!(reg & E1000_VLVF_POOLSEL_MASK) &&
3853 (reg & E1000_VLVF_VLANID_ENABLE)) {
3855 vid = reg & E1000_VLVF_VLANID_MASK;
3856 igb_vfta_set(hw, vid, false);
3859 wr32(E1000_VLVF(i), reg);
3863 static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
3865 struct e1000_hw *hw = &adapter->hw;
3868 /* It is an error to call this function when VFs are not enabled */
3869 if (!adapter->vfs_allocated_count)
3872 /* Find the vlan filter for this id */
3873 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3874 reg = rd32(E1000_VLVF(i));
3875 if ((reg & E1000_VLVF_VLANID_ENABLE) &&
3876 vid == (reg & E1000_VLVF_VLANID_MASK))
3881 if (i == E1000_VLVF_ARRAY_SIZE) {
3882 /* Did not find a matching VLAN ID entry that was
3883 * enabled. Search for a free filter entry, i.e.
3884 * one without the enable bit set
3886 for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) {
3887 reg = rd32(E1000_VLVF(i));
3888 if (!(reg & E1000_VLVF_VLANID_ENABLE))
3892 if (i < E1000_VLVF_ARRAY_SIZE) {
3893 /* Found an enabled/available entry */
3894 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
3896 /* if !enabled we need to set this up in vfta */
3897 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
3898 /* add VID to filter table, if bit already set
3899 * PF must have added it outside of table */
3900 if (igb_vfta_set(hw, vid, true))
3901 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
3902 adapter->vfs_allocated_count);
3903 reg |= E1000_VLVF_VLANID_ENABLE;
3905 reg &= ~E1000_VLVF_VLANID_MASK;
3908 wr32(E1000_VLVF(i), reg);
3912 if (i < E1000_VLVF_ARRAY_SIZE) {
3913 /* remove vf from the pool */
3914 reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf));
3915 /* if pool is empty then remove entry from vfta */
3916 if (!(reg & E1000_VLVF_POOLSEL_MASK)) {
3918 igb_vfta_set(hw, vid, false);
3920 wr32(E1000_VLVF(i), reg);
3927 static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
3929 int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
3930 int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
3932 return igb_vlvf_set(adapter, vid, add, vf);
3935 static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
3937 struct e1000_hw *hw = &adapter->hw;
3939 /* disable mailbox functionality for vf */
3940 adapter->vf_data[vf].clear_to_send = false;
3942 /* reset offloads to defaults */
3943 igb_set_vmolr(hw, vf);
3945 /* reset vlans for device */
3946 igb_clear_vf_vfta(adapter, vf);
3948 /* reset multicast table array for vf */
3949 adapter->vf_data[vf].num_vf_mc_hashes = 0;
3951 /* Flush and reset the mta with the new values */
3952 igb_set_multi(adapter->netdev);
3955 static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
3957 struct e1000_hw *hw = &adapter->hw;
3958 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
3960 u8 *addr = (u8 *)(&msgbuf[1]);
3962 /* process all the same items cleared in a function level reset */
3963 igb_vf_reset_event(adapter, vf);
3965 /* set vf mac address */
3966 igb_rar_set(hw, vf_mac, vf + 1);
3967 igb_set_rah_pool(hw, vf, vf + 1);
3969 /* enable transmit and receive for vf */
3970 reg = rd32(E1000_VFTE);
3971 wr32(E1000_VFTE, reg | (1 << vf));
3972 reg = rd32(E1000_VFRE);
3973 wr32(E1000_VFRE, reg | (1 << vf));
3975 /* enable mailbox functionality for vf */
3976 adapter->vf_data[vf].clear_to_send = true;
3978 /* reply to reset with ack and vf mac address */
3979 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
3980 memcpy(addr, vf_mac, 6);
3981 igb_write_mbx(hw, msgbuf, 3, vf);
3984 static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
3986 unsigned char *addr = (char *)&msg[1];
3989 if (is_valid_ether_addr(addr))
3990 err = igb_set_vf_mac(adapter, vf, addr);
3996 static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
3998 struct e1000_hw *hw = &adapter->hw;
3999 u32 msg = E1000_VT_MSGTYPE_NACK;
4001 /* if device isn't clear to send it shouldn't be reading either */
4002 if (!adapter->vf_data[vf].clear_to_send)
4003 igb_write_mbx(hw, &msg, 1, vf);
4007 static void igb_msg_task(struct igb_adapter *adapter)
4009 struct e1000_hw *hw = &adapter->hw;
4012 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4013 /* process any reset requests */
4014 if (!igb_check_for_rst(hw, vf)) {
4015 adapter->vf_data[vf].clear_to_send = false;
4016 igb_vf_reset_event(adapter, vf);
4019 /* process any messages pending */
4020 if (!igb_check_for_msg(hw, vf))
4021 igb_rcv_msg_from_vf(adapter, vf);
4023 /* process any acks */
4024 if (!igb_check_for_ack(hw, vf))
4025 igb_rcv_ack_from_vf(adapter, vf);
4030 static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4032 u32 mbx_size = E1000_VFMAILBOX_SIZE;
4033 u32 msgbuf[mbx_size];
4034 struct e1000_hw *hw = &adapter->hw;
4037 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf);
4040 dev_err(&adapter->pdev->dev,
4041 "Error receiving message from VF\n");
4043 /* this is a message we already processed, do nothing */
4044 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4048 * until the vf completes a reset it should not be
4049 * allowed to start any configuration.
4052 if (msgbuf[0] == E1000_VF_RESET) {
4053 igb_vf_reset_msg(adapter, vf);
4058 if (!adapter->vf_data[vf].clear_to_send) {
4059 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4060 igb_write_mbx(hw, msgbuf, 1, vf);
4064 switch ((msgbuf[0] & 0xFFFF)) {
4065 case E1000_VF_SET_MAC_ADDR:
4066 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4068 case E1000_VF_SET_MULTICAST:
4069 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4071 case E1000_VF_SET_LPE:
4072 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
4074 case E1000_VF_SET_VLAN:
4075 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4078 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4083 /* notify the VF of the results of what it sent us */
4085 msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
4087 msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
4089 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4091 igb_write_mbx(hw, msgbuf, 1, vf);
4097 * igb_intr_msi - Interrupt Handler
4098 * @irq: interrupt number
4099 * @data: pointer to a network interface device structure
4101 static irqreturn_t igb_intr_msi(int irq, void *data)
4103 struct net_device *netdev = data;
4104 struct igb_adapter *adapter = netdev_priv(netdev);
4105 struct e1000_hw *hw = &adapter->hw;
4106 /* read ICR disables interrupts using IAM */
4107 u32 icr = rd32(E1000_ICR);
4109 igb_write_itr(adapter->rx_ring);
4111 if(icr & E1000_ICR_DOUTSYNC) {
4112 /* HW is reporting DMA is out of sync */
4113 adapter->stats.doosync++;
4116 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4117 hw->mac.get_link_status = 1;
4118 if (!test_bit(__IGB_DOWN, &adapter->state))
4119 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4122 napi_schedule(&adapter->rx_ring[0].napi);
4128 * igb_intr - Legacy Interrupt Handler
4129 * @irq: interrupt number
4130 * @data: pointer to a network interface device structure
4132 static irqreturn_t igb_intr(int irq, void *data)
4134 struct net_device *netdev = data;
4135 struct igb_adapter *adapter = netdev_priv(netdev);
4136 struct e1000_hw *hw = &adapter->hw;
4137 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4138 * need for the IMC write */
4139 u32 icr = rd32(E1000_ICR);
4141 return IRQ_NONE; /* Not our interrupt */
4143 igb_write_itr(adapter->rx_ring);
4145 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4146 * not set, then the adapter didn't send an interrupt */
4147 if (!(icr & E1000_ICR_INT_ASSERTED))
4150 if(icr & E1000_ICR_DOUTSYNC) {
4151 /* HW is reporting DMA is out of sync */
4152 adapter->stats.doosync++;
4155 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
4156 hw->mac.get_link_status = 1;
4157 /* guard against interrupt when we're going down */
4158 if (!test_bit(__IGB_DOWN, &adapter->state))
4159 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4162 napi_schedule(&adapter->rx_ring[0].napi);
4167 static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
4169 struct igb_adapter *adapter = rx_ring->adapter;
4170 struct e1000_hw *hw = &adapter->hw;
4172 if (adapter->itr_setting & 3) {
4173 if (adapter->num_rx_queues == 1)
4174 igb_set_itr(adapter);
4176 igb_update_ring_itr(rx_ring);
4179 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4180 if (adapter->msix_entries)
4181 wr32(E1000_EIMS, rx_ring->eims_value);
4183 igb_irq_enable(adapter);
4188 * igb_poll - NAPI Rx polling callback
4189 * @napi: napi polling structure
4190 * @budget: count of how many packets we should handle
4192 static int igb_poll(struct napi_struct *napi, int budget)
4194 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi);
4197 #ifdef CONFIG_IGB_DCA
4198 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
4199 igb_update_rx_dca(rx_ring);
4201 igb_clean_rx_irq_adv(rx_ring, &work_done, budget);
4203 if (rx_ring->buddy) {
4204 #ifdef CONFIG_IGB_DCA
4205 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
4206 igb_update_tx_dca(rx_ring->buddy);
4208 if (!igb_clean_tx_irq(rx_ring->buddy))
4212 /* If not enough Rx work done, exit the polling mode */
4213 if (work_done < budget) {
4214 napi_complete(napi);
4215 igb_rx_irq_enable(rx_ring);
4222 * igb_hwtstamp - utility function which checks for TX time stamp
4223 * @adapter: board private structure
4224 * @skb: packet that was just sent
4226 * If we were asked to do hardware stamping and such a time stamp is
4227 * available, then it must have been for this skb here because we only
4228 * allow only one such packet into the queue.
4230 static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb)
4232 union skb_shared_tx *shtx = skb_tx(skb);
4233 struct e1000_hw *hw = &adapter->hw;
4235 if (unlikely(shtx->hardware)) {
4236 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID;
4238 u64 regval = rd32(E1000_TXSTMPL);
4240 struct skb_shared_hwtstamps shhwtstamps;
4242 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
4243 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4244 ns = timecounter_cyc2time(&adapter->clock,
4246 timecompare_update(&adapter->compare, ns);
4247 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4248 shhwtstamps.syststamp =
4249 timecompare_transform(&adapter->compare, ns);
4250 skb_tstamp_tx(skb, &shhwtstamps);
4256 * igb_clean_tx_irq - Reclaim resources after transmit completes
4257 * @adapter: board private structure
4258 * returns true if ring is completely cleaned
4260 static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4262 struct igb_adapter *adapter = tx_ring->adapter;
4263 struct net_device *netdev = adapter->netdev;
4264 struct e1000_hw *hw = &adapter->hw;
4265 struct igb_buffer *buffer_info;
4266 struct sk_buff *skb;
4267 union e1000_adv_tx_desc *tx_desc, *eop_desc;
4268 unsigned int total_bytes = 0, total_packets = 0;
4269 unsigned int i, eop, count = 0;
4270 bool cleaned = false;
4272 i = tx_ring->next_to_clean;
4273 eop = tx_ring->buffer_info[i].next_to_watch;
4274 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4276 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
4277 (count < tx_ring->count)) {
4278 for (cleaned = false; !cleaned; count++) {
4279 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
4280 buffer_info = &tx_ring->buffer_info[i];
4281 cleaned = (i == eop);
4282 skb = buffer_info->skb;
4285 unsigned int segs, bytecount;
4286 /* gso_segs is currently only valid for tcp */
4287 segs = skb_shinfo(skb)->gso_segs ?: 1;
4288 /* multiply data chunks by size of headers */
4289 bytecount = ((segs - 1) * skb_headlen(skb)) +
4291 total_packets += segs;
4292 total_bytes += bytecount;
4294 igb_tx_hwtstamp(adapter, skb);
4297 igb_unmap_and_free_tx_resource(adapter, buffer_info);
4298 tx_desc->wb.status = 0;
4301 if (i == tx_ring->count)
4304 eop = tx_ring->buffer_info[i].next_to_watch;
4305 eop_desc = E1000_TX_DESC_ADV(*tx_ring, eop);
4308 tx_ring->next_to_clean = i;
4310 if (unlikely(count &&
4311 netif_carrier_ok(netdev) &&
4312 IGB_DESC_UNUSED(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
4313 /* Make sure that anybody stopping the queue after this
4314 * sees the new next_to_clean.
4317 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4318 !(test_bit(__IGB_DOWN, &adapter->state))) {
4319 netif_wake_subqueue(netdev, tx_ring->queue_index);
4320 ++adapter->restart_queue;
4324 if (tx_ring->detect_tx_hung) {
4325 /* Detect a transmit hang in hardware, this serializes the
4326 * check with the clearing of time_stamp and movement of i */
4327 tx_ring->detect_tx_hung = false;
4328 if (tx_ring->buffer_info[i].time_stamp &&
4329 time_after(jiffies, tx_ring->buffer_info[i].time_stamp +
4330 (adapter->tx_timeout_factor * HZ))
4331 && !(rd32(E1000_STATUS) &
4332 E1000_STATUS_TXOFF)) {
4334 /* detected Tx unit hang */
4335 dev_err(&adapter->pdev->dev,
4336 "Detected Tx Unit Hang\n"
4340 " next_to_use <%x>\n"
4341 " next_to_clean <%x>\n"
4342 "buffer_info[next_to_clean]\n"
4343 " time_stamp <%lx>\n"
4344 " next_to_watch <%x>\n"
4346 " desc.status <%x>\n",
4347 tx_ring->queue_index,
4348 readl(adapter->hw.hw_addr + tx_ring->head),
4349 readl(adapter->hw.hw_addr + tx_ring->tail),
4350 tx_ring->next_to_use,
4351 tx_ring->next_to_clean,
4352 tx_ring->buffer_info[i].time_stamp,
4355 eop_desc->wb.status);
4356 netif_stop_subqueue(netdev, tx_ring->queue_index);
4359 tx_ring->total_bytes += total_bytes;
4360 tx_ring->total_packets += total_packets;
4361 tx_ring->tx_stats.bytes += total_bytes;
4362 tx_ring->tx_stats.packets += total_packets;
4363 adapter->net_stats.tx_bytes += total_bytes;
4364 adapter->net_stats.tx_packets += total_packets;
4365 return (count < tx_ring->count);
4369 * igb_receive_skb - helper function to handle rx indications
4370 * @ring: pointer to receive ring receving this packet
4371 * @status: descriptor status field as written by hardware
4372 * @rx_desc: receive descriptor containing vlan and type information.
4373 * @skb: pointer to sk_buff to be indicated to stack
4375 static void igb_receive_skb(struct igb_ring *ring, u8 status,
4376 union e1000_adv_rx_desc * rx_desc,
4377 struct sk_buff *skb)
4379 struct igb_adapter * adapter = ring->adapter;
4380 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP));
4382 skb_record_rx_queue(skb, ring->queue_index);
4383 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
4385 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4386 le16_to_cpu(rx_desc->wb.upper.vlan),
4389 napi_gro_receive(&ring->napi, skb);
4392 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4393 le16_to_cpu(rx_desc->wb.upper.vlan));
4395 netif_receive_skb(skb);
4399 static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4400 u32 status_err, struct sk_buff *skb)
4402 skb->ip_summed = CHECKSUM_NONE;
4404 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4405 if ((status_err & E1000_RXD_STAT_IXSM) || !adapter->rx_csum)
4407 /* TCP/UDP checksum error bit is set */
4409 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
4410 /* let the stack verify checksum errors */
4411 adapter->hw_csum_err++;
4414 /* It must be a TCP or UDP packet with a valid checksum */
4415 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4416 skb->ip_summed = CHECKSUM_UNNECESSARY;
4418 adapter->hw_csum_good++;
4421 static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4422 int *work_done, int budget)
4424 struct igb_adapter *adapter = rx_ring->adapter;
4425 struct net_device *netdev = adapter->netdev;
4426 struct e1000_hw *hw = &adapter->hw;
4427 struct pci_dev *pdev = adapter->pdev;
4428 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4429 struct igb_buffer *buffer_info , *next_buffer;
4430 struct sk_buff *skb;
4431 bool cleaned = false;
4432 int cleaned_count = 0;
4433 unsigned int total_bytes = 0, total_packets = 0;
4435 u32 length, hlen, staterr;
4437 i = rx_ring->next_to_clean;
4438 buffer_info = &rx_ring->buffer_info[i];
4439 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4440 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4442 while (staterr & E1000_RXD_STAT_DD) {
4443 if (*work_done >= budget)
4447 skb = buffer_info->skb;
4448 prefetch(skb->data - NET_IP_ALIGN);
4449 buffer_info->skb = NULL;
4452 if (i == rx_ring->count)
4454 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4456 next_buffer = &rx_ring->buffer_info[i];
4458 length = le16_to_cpu(rx_desc->wb.upper.length);
4462 if (!adapter->rx_ps_hdr_size) {
4463 pci_unmap_single(pdev, buffer_info->dma,
4464 adapter->rx_buffer_len +
4466 PCI_DMA_FROMDEVICE);
4467 skb_put(skb, length);
4471 /* HW will not DMA in data larger than the given buffer, even
4472 * if it parses the (NFS, of course) header to be larger. In
4473 * that case, it fills the header buffer and spills the rest
4476 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4477 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4478 if (hlen > adapter->rx_ps_hdr_size)
4479 hlen = adapter->rx_ps_hdr_size;
4481 if (!skb_shinfo(skb)->nr_frags) {
4482 pci_unmap_single(pdev, buffer_info->dma,
4483 adapter->rx_ps_hdr_size + NET_IP_ALIGN,
4484 PCI_DMA_FROMDEVICE);
4489 pci_unmap_page(pdev, buffer_info->page_dma,
4490 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
4491 buffer_info->page_dma = 0;
4493 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
4495 buffer_info->page_offset,
4498 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
4499 (page_count(buffer_info->page) != 1))
4500 buffer_info->page = NULL;
4502 get_page(buffer_info->page);
4505 skb->data_len += length;
4507 skb->truesize += length;
4510 if (!(staterr & E1000_RXD_STAT_EOP)) {
4511 buffer_info->skb = next_buffer->skb;
4512 buffer_info->dma = next_buffer->dma;
4513 next_buffer->skb = skb;
4514 next_buffer->dma = 0;
4519 * If this bit is set, then the RX registers contain
4520 * the time stamp. No other packet will be time
4521 * stamped until we read these registers, so read the
4522 * registers to make them available again. Because
4523 * only one packet can be time stamped at a time, we
4524 * know that the register values must belong to this
4525 * one here and therefore we don't need to compare
4526 * any of the additional attributes stored for it.
4528 * If nothing went wrong, then it should have a
4529 * skb_shared_tx that we can turn into a
4530 * skb_shared_hwtstamps.
4532 * TODO: can time stamping be triggered (thus locking
4533 * the registers) without the packet reaching this point
4534 * here? In that case RX time stamping would get stuck.
4536 * TODO: in "time stamp all packets" mode this bit is
4537 * not set. Need a global flag for this mode and then
4538 * always read the registers. Cannot be done without
4541 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4544 struct skb_shared_hwtstamps *shhwtstamps =
4547 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4548 "igb: no RX time stamp available for time stamped packet");
4549 regval = rd32(E1000_RXSTMPL);
4550 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4551 ns = timecounter_cyc2time(&adapter->clock, regval);
4552 timecompare_update(&adapter->compare, ns);
4553 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4554 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4555 shhwtstamps->syststamp =
4556 timecompare_transform(&adapter->compare, ns);
4559 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4560 dev_kfree_skb_irq(skb);
4564 total_bytes += skb->len;
4567 igb_rx_checksum_adv(adapter, staterr, skb);
4569 skb->protocol = eth_type_trans(skb, netdev);
4571 igb_receive_skb(rx_ring, staterr, rx_desc, skb);
4574 rx_desc->wb.upper.status_error = 0;
4576 /* return some buffers to hardware, one at a time is too slow */
4577 if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
4578 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
4582 /* use prefetched values */
4584 buffer_info = next_buffer;
4585 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
4588 rx_ring->next_to_clean = i;
4589 cleaned_count = IGB_DESC_UNUSED(rx_ring);
4592 igb_alloc_rx_buffers_adv(rx_ring, cleaned_count);
4594 rx_ring->total_packets += total_packets;
4595 rx_ring->total_bytes += total_bytes;
4596 rx_ring->rx_stats.packets += total_packets;
4597 rx_ring->rx_stats.bytes += total_bytes;
4598 adapter->net_stats.rx_bytes += total_bytes;
4599 adapter->net_stats.rx_packets += total_packets;
4604 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4605 * @adapter: address of board private structure
4607 static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4610 struct igb_adapter *adapter = rx_ring->adapter;
4611 struct net_device *netdev = adapter->netdev;
4612 struct pci_dev *pdev = adapter->pdev;
4613 union e1000_adv_rx_desc *rx_desc;
4614 struct igb_buffer *buffer_info;
4615 struct sk_buff *skb;
4619 i = rx_ring->next_to_use;
4620 buffer_info = &rx_ring->buffer_info[i];
4622 if (adapter->rx_ps_hdr_size)
4623 bufsz = adapter->rx_ps_hdr_size;
4625 bufsz = adapter->rx_buffer_len;
4626 bufsz += NET_IP_ALIGN;
4628 while (cleaned_count--) {
4629 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4631 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
4632 if (!buffer_info->page) {
4633 buffer_info->page = alloc_page(GFP_ATOMIC);
4634 if (!buffer_info->page) {
4635 adapter->alloc_rx_buff_failed++;
4638 buffer_info->page_offset = 0;
4640 buffer_info->page_offset ^= PAGE_SIZE / 2;
4642 buffer_info->page_dma =
4643 pci_map_page(pdev, buffer_info->page,
4644 buffer_info->page_offset,
4646 PCI_DMA_FROMDEVICE);
4649 if (!buffer_info->skb) {
4650 skb = netdev_alloc_skb(netdev, bufsz);
4652 adapter->alloc_rx_buff_failed++;
4656 /* Make buffer alignment 2 beyond a 16 byte boundary
4657 * this will result in a 16 byte aligned IP header after
4658 * the 14 byte MAC header is removed
4660 skb_reserve(skb, NET_IP_ALIGN);
4662 buffer_info->skb = skb;
4663 buffer_info->dma = pci_map_single(pdev, skb->data,
4665 PCI_DMA_FROMDEVICE);
4667 /* Refresh the desc even if buffer_addrs didn't change because
4668 * each write-back erases this info. */
4669 if (adapter->rx_ps_hdr_size) {
4670 rx_desc->read.pkt_addr =
4671 cpu_to_le64(buffer_info->page_dma);
4672 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4674 rx_desc->read.pkt_addr =
4675 cpu_to_le64(buffer_info->dma);
4676 rx_desc->read.hdr_addr = 0;
4680 if (i == rx_ring->count)
4682 buffer_info = &rx_ring->buffer_info[i];
4686 if (rx_ring->next_to_use != i) {
4687 rx_ring->next_to_use = i;
4689 i = (rx_ring->count - 1);
4693 /* Force memory writes to complete before letting h/w
4694 * know there are new descriptors to fetch. (Only
4695 * applicable for weak-ordered memory model archs,
4696 * such as IA-64). */
4698 writel(i, adapter->hw.hw_addr + rx_ring->tail);
4708 static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4710 struct igb_adapter *adapter = netdev_priv(netdev);
4711 struct mii_ioctl_data *data = if_mii(ifr);
4713 if (adapter->hw.phy.media_type != e1000_media_type_copper)
4718 data->phy_id = adapter->hw.phy.addr;
4721 if (!capable(CAP_NET_ADMIN))
4723 if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4735 * igb_hwtstamp_ioctl - control hardware time stamping
4740 * Outgoing time stamping can be enabled and disabled. Play nice and
4741 * disable it when requested, although it shouldn't case any overhead
4742 * when no packet needs it. At most one packet in the queue may be
4743 * marked for time stamping, otherwise it would be impossible to tell
4744 * for sure to which packet the hardware time stamp belongs.
4746 * Incoming time stamping has to be configured via the hardware
4747 * filters. Not all combinations are supported, in particular event
4748 * type has to be specified. Matching the kind of event packet is
4749 * not supported, with the exception of "all V2 events regardless of
4753 static int igb_hwtstamp_ioctl(struct net_device *netdev,
4754 struct ifreq *ifr, int cmd)
4756 struct igb_adapter *adapter = netdev_priv(netdev);
4757 struct e1000_hw *hw = &adapter->hw;
4758 struct hwtstamp_config config;
4759 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4760 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED;
4761 u32 tsync_rx_ctl_type = 0;
4762 u32 tsync_rx_cfg = 0;
4765 short port = 319; /* PTP */
4768 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4771 /* reserved for future extensions */
4775 switch (config.tx_type) {
4776 case HWTSTAMP_TX_OFF:
4777 tsync_tx_ctl_bit = 0;
4779 case HWTSTAMP_TX_ON:
4780 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4786 switch (config.rx_filter) {
4787 case HWTSTAMP_FILTER_NONE:
4788 tsync_rx_ctl_bit = 0;
4790 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4791 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4792 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4793 case HWTSTAMP_FILTER_ALL:
4795 * register TSYNCRXCFG must be set, therefore it is not
4796 * possible to time stamp both Sync and Delay_Req messages
4797 * => fall back to time stamping all packets
4799 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL;
4800 config.rx_filter = HWTSTAMP_FILTER_ALL;
4802 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4803 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4804 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4807 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4808 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1;
4809 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4812 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4813 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4814 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4815 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
4818 config.rx_filter = HWTSTAMP_FILTER_SOME;
4820 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4821 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4822 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
4823 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
4826 config.rx_filter = HWTSTAMP_FILTER_SOME;
4828 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4829 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4830 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4831 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2;
4832 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
4839 /* enable/disable TX */
4840 regval = rd32(E1000_TSYNCTXCTL);
4841 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit;
4842 wr32(E1000_TSYNCTXCTL, regval);
4844 /* enable/disable RX, define which PTP packets are time stamped */
4845 regval = rd32(E1000_TSYNCRXCTL);
4846 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit;
4847 regval = (regval & ~0xE) | tsync_rx_ctl_type;
4848 wr32(E1000_TSYNCRXCTL, regval);
4849 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
4852 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7
4853 * (Ethertype to filter on)
4854 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
4855 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
4857 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
4859 /* L4 Queue Filter[0]: only filter by source and destination port */
4860 wr32(E1000_SPQF0, htons(port));
4861 wr32(E1000_IMIREXT(0), is_l4 ?
4862 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
4863 wr32(E1000_IMIR(0), is_l4 ?
4865 | (0<<16) /* immediate interrupt disabled */
4866 | 0 /* (1<<17) bit cleared: do not bypass
4867 destination port check */)
4869 wr32(E1000_FTQF0, is_l4 ?
4871 | (1<<15) /* VF not compared */
4872 | (1<<27) /* Enable Timestamping */
4873 | (7<<28) /* only source port filter enabled,
4874 source/target address and protocol
4876 : ((1<<15) | (15<<28) /* all mask bits set = filter not
4881 adapter->hwtstamp_config = config;
4883 /* clear TX/RX time stamp registers, just to be sure */
4884 regval = rd32(E1000_TXSTMPH);
4885 regval = rd32(E1000_RXSTMPH);
4887 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
4897 static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4903 return igb_mii_ioctl(netdev, ifr, cmd);
4905 return igb_hwtstamp_ioctl(netdev, ifr, cmd);
4911 static void igb_vlan_rx_register(struct net_device *netdev,
4912 struct vlan_group *grp)
4914 struct igb_adapter *adapter = netdev_priv(netdev);
4915 struct e1000_hw *hw = &adapter->hw;
4918 igb_irq_disable(adapter);
4919 adapter->vlgrp = grp;
4922 /* enable VLAN tag insert/strip */
4923 ctrl = rd32(E1000_CTRL);
4924 ctrl |= E1000_CTRL_VME;
4925 wr32(E1000_CTRL, ctrl);
4927 /* enable VLAN receive filtering */
4928 rctl = rd32(E1000_RCTL);
4929 rctl &= ~E1000_RCTL_CFIEN;
4930 wr32(E1000_RCTL, rctl);
4931 igb_update_mng_vlan(adapter);
4933 /* disable VLAN tag insert/strip */
4934 ctrl = rd32(E1000_CTRL);
4935 ctrl &= ~E1000_CTRL_VME;
4936 wr32(E1000_CTRL, ctrl);
4938 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
4939 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4940 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
4944 igb_rlpml_set(adapter);
4946 if (!test_bit(__IGB_DOWN, &adapter->state))
4947 igb_irq_enable(adapter);
4950 static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4952 struct igb_adapter *adapter = netdev_priv(netdev);
4953 struct e1000_hw *hw = &adapter->hw;
4954 int pf_id = adapter->vfs_allocated_count;
4956 if ((hw->mng_cookie.status &
4957 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4958 (vid == adapter->mng_vlan_id))
4961 /* add vid to vlvf if sr-iov is enabled,
4962 * if that fails add directly to filter table */
4963 if (igb_vlvf_set(adapter, vid, true, pf_id))
4964 igb_vfta_set(hw, vid, true);
4968 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4970 struct igb_adapter *adapter = netdev_priv(netdev);
4971 struct e1000_hw *hw = &adapter->hw;
4972 int pf_id = adapter->vfs_allocated_count;
4974 igb_irq_disable(adapter);
4975 vlan_group_set_device(adapter->vlgrp, vid, NULL);
4977 if (!test_bit(__IGB_DOWN, &adapter->state))
4978 igb_irq_enable(adapter);
4980 if ((adapter->hw.mng_cookie.status &
4981 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
4982 (vid == adapter->mng_vlan_id)) {
4983 /* release control to f/w */
4984 igb_release_hw_control(adapter);
4988 /* remove vid from vlvf if sr-iov is enabled,
4989 * if not in vlvf remove from vfta */
4990 if (igb_vlvf_set(adapter, vid, false, pf_id))
4991 igb_vfta_set(hw, vid, false);
4994 static void igb_restore_vlan(struct igb_adapter *adapter)
4996 igb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4998 if (adapter->vlgrp) {
5000 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5001 if (!vlan_group_get_device(adapter->vlgrp, vid))
5003 igb_vlan_rx_add_vid(adapter->netdev, vid);
5008 int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5010 struct e1000_mac_info *mac = &adapter->hw.mac;
5014 /* Fiber NICs only allow 1000 gbps Full duplex */
5015 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
5016 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
5017 dev_err(&adapter->pdev->dev,
5018 "Unsupported Speed/Duplex configuration\n");
5023 case SPEED_10 + DUPLEX_HALF:
5024 mac->forced_speed_duplex = ADVERTISE_10_HALF;
5026 case SPEED_10 + DUPLEX_FULL:
5027 mac->forced_speed_duplex = ADVERTISE_10_FULL;
5029 case SPEED_100 + DUPLEX_HALF:
5030 mac->forced_speed_duplex = ADVERTISE_100_HALF;
5032 case SPEED_100 + DUPLEX_FULL:
5033 mac->forced_speed_duplex = ADVERTISE_100_FULL;
5035 case SPEED_1000 + DUPLEX_FULL:
5037 adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
5039 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5041 dev_err(&adapter->pdev->dev,
5042 "Unsupported Speed/Duplex configuration\n");
5048 static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
5050 struct net_device *netdev = pci_get_drvdata(pdev);
5051 struct igb_adapter *adapter = netdev_priv(netdev);
5052 struct e1000_hw *hw = &adapter->hw;
5053 u32 ctrl, rctl, status;
5054 u32 wufc = adapter->wol;
5059 netif_device_detach(netdev);
5061 if (netif_running(netdev))
5064 igb_reset_interrupt_capability(adapter);
5066 igb_free_queues(adapter);
5069 retval = pci_save_state(pdev);
5074 status = rd32(E1000_STATUS);
5075 if (status & E1000_STATUS_LU)
5076 wufc &= ~E1000_WUFC_LNKC;
5079 igb_setup_rctl(adapter);
5080 igb_set_multi(netdev);
5082 /* turn on all-multi mode if wake on multicast is enabled */
5083 if (wufc & E1000_WUFC_MC) {
5084 rctl = rd32(E1000_RCTL);
5085 rctl |= E1000_RCTL_MPE;
5086 wr32(E1000_RCTL, rctl);
5089 ctrl = rd32(E1000_CTRL);
5090 /* advertise wake from D3Cold */
5091 #define E1000_CTRL_ADVD3WUC 0x00100000
5092 /* phy power management enable */
5093 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5094 ctrl |= E1000_CTRL_ADVD3WUC;
5095 wr32(E1000_CTRL, ctrl);
5097 /* Allow time for pending master requests to run */
5098 igb_disable_pcie_master(&adapter->hw);
5100 wr32(E1000_WUC, E1000_WUC_PME_EN);
5101 wr32(E1000_WUFC, wufc);
5104 wr32(E1000_WUFC, 0);
5107 /* make sure adapter isn't asleep if manageability/wol is enabled */
5108 if (wufc || adapter->en_mng_pt) {
5109 pci_enable_wake(pdev, PCI_D3hot, 1);
5110 pci_enable_wake(pdev, PCI_D3cold, 1);
5112 igb_shutdown_fiber_serdes_link_82575(hw);
5113 pci_enable_wake(pdev, PCI_D3hot, 0);
5114 pci_enable_wake(pdev, PCI_D3cold, 0);
5117 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5118 * would have already happened in close and is redundant. */
5119 igb_release_hw_control(adapter);
5121 pci_disable_device(pdev);
5123 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5129 static int igb_resume(struct pci_dev *pdev)
5131 struct net_device *netdev = pci_get_drvdata(pdev);
5132 struct igb_adapter *adapter = netdev_priv(netdev);
5133 struct e1000_hw *hw = &adapter->hw;
5136 pci_set_power_state(pdev, PCI_D0);
5137 pci_restore_state(pdev);
5139 err = pci_enable_device_mem(pdev);
5142 "igb: Cannot enable PCI device from suspend\n");
5145 pci_set_master(pdev);
5147 pci_enable_wake(pdev, PCI_D3hot, 0);
5148 pci_enable_wake(pdev, PCI_D3cold, 0);
5150 igb_set_interrupt_capability(adapter);
5152 if (igb_alloc_queues(adapter)) {
5153 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5157 /* e1000_power_up_phy(adapter); */
5161 /* let the f/w know that the h/w is now under the control of the
5163 igb_get_hw_control(adapter);
5165 wr32(E1000_WUS, ~0);
5167 if (netif_running(netdev)) {
5168 err = igb_open(netdev);
5173 netif_device_attach(netdev);
5179 static void igb_shutdown(struct pci_dev *pdev)
5181 igb_suspend(pdev, PMSG_SUSPEND);
5184 #ifdef CONFIG_NET_POLL_CONTROLLER
5186 * Polling 'interrupt' - used by things like netconsole to send skbs
5187 * without having to re-enable interrupts. It's not called while
5188 * the interrupt routine is executing.
5190 static void igb_netpoll(struct net_device *netdev)
5192 struct igb_adapter *adapter = netdev_priv(netdev);
5193 struct e1000_hw *hw = &adapter->hw;
5196 if (!adapter->msix_entries) {
5197 igb_irq_disable(adapter);
5198 napi_schedule(&adapter->rx_ring[0].napi);
5202 for (i = 0; i < adapter->num_tx_queues; i++) {
5203 struct igb_ring *tx_ring = &adapter->tx_ring[i];
5204 wr32(E1000_EIMC, tx_ring->eims_value);
5205 igb_clean_tx_irq(tx_ring);
5206 wr32(E1000_EIMS, tx_ring->eims_value);
5209 for (i = 0; i < adapter->num_rx_queues; i++) {
5210 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5211 wr32(E1000_EIMC, rx_ring->eims_value);
5212 napi_schedule(&rx_ring->napi);
5215 #endif /* CONFIG_NET_POLL_CONTROLLER */
5218 * igb_io_error_detected - called when PCI error is detected
5219 * @pdev: Pointer to PCI device
5220 * @state: The current pci connection state
5222 * This function is called after a PCI bus error affecting
5223 * this device has been detected.
5225 static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
5226 pci_channel_state_t state)
5228 struct net_device *netdev = pci_get_drvdata(pdev);
5229 struct igb_adapter *adapter = netdev_priv(netdev);
5231 netif_device_detach(netdev);
5233 if (netif_running(netdev))
5235 pci_disable_device(pdev);
5237 /* Request a slot slot reset. */
5238 return PCI_ERS_RESULT_NEED_RESET;
5242 * igb_io_slot_reset - called after the pci bus has been reset.
5243 * @pdev: Pointer to PCI device
5245 * Restart the card from scratch, as if from a cold-boot. Implementation
5246 * resembles the first-half of the igb_resume routine.
5248 static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
5250 struct net_device *netdev = pci_get_drvdata(pdev);
5251 struct igb_adapter *adapter = netdev_priv(netdev);
5252 struct e1000_hw *hw = &adapter->hw;
5253 pci_ers_result_t result;
5256 if (pci_enable_device_mem(pdev)) {
5258 "Cannot re-enable PCI device after reset.\n");
5259 result = PCI_ERS_RESULT_DISCONNECT;
5261 pci_set_master(pdev);
5262 pci_restore_state(pdev);
5264 pci_enable_wake(pdev, PCI_D3hot, 0);
5265 pci_enable_wake(pdev, PCI_D3cold, 0);
5268 wr32(E1000_WUS, ~0);
5269 result = PCI_ERS_RESULT_RECOVERED;
5272 err = pci_cleanup_aer_uncorrect_error_status(pdev);
5274 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status "
5275 "failed 0x%0x\n", err);
5276 /* non-fatal, continue */
5283 * igb_io_resume - called when traffic can start flowing again.
5284 * @pdev: Pointer to PCI device
5286 * This callback is called when the error recovery driver tells us that
5287 * its OK to resume normal operation. Implementation resembles the
5288 * second-half of the igb_resume routine.
5290 static void igb_io_resume(struct pci_dev *pdev)
5292 struct net_device *netdev = pci_get_drvdata(pdev);
5293 struct igb_adapter *adapter = netdev_priv(netdev);
5295 if (netif_running(netdev)) {
5296 if (igb_up(adapter)) {
5297 dev_err(&pdev->dev, "igb_up failed after reset\n");
5302 netif_device_attach(netdev);
5304 /* let the f/w know that the h/w is now under the control of the
5306 igb_get_hw_control(adapter);
5309 static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
5313 reg_data = rd32(E1000_VMOLR(vfn));
5314 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
5315 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
5316 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
5317 E1000_VMOLR_AUPE | /* Accept untagged packets */
5318 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
5319 wr32(E1000_VMOLR(vfn), reg_data);
5322 static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
5325 struct e1000_hw *hw = &adapter->hw;
5328 vmolr = rd32(E1000_VMOLR(vfn));
5329 vmolr &= ~E1000_VMOLR_RLPML_MASK;
5330 vmolr |= size | E1000_VMOLR_LPE;
5331 wr32(E1000_VMOLR(vfn), vmolr);
5336 static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
5340 reg_data = rd32(E1000_RAH(entry));
5341 reg_data &= ~E1000_RAH_POOL_MASK;
5342 reg_data |= E1000_RAH_POOL_1 << pool;;
5343 wr32(E1000_RAH(entry), reg_data);
5346 static void igb_set_mc_list_pools(struct igb_adapter *adapter,
5347 int entry_count, u16 total_rar_filters)
5349 struct e1000_hw *hw = &adapter->hw;
5350 int i = adapter->vfs_allocated_count + 1;
5352 if ((i + entry_count) < total_rar_filters)
5353 total_rar_filters = i + entry_count;
5355 for (; i < total_rar_filters; i++)
5356 igb_set_rah_pool(hw, adapter->vfs_allocated_count, i);
5359 static int igb_set_vf_mac(struct igb_adapter *adapter,
5360 int vf, unsigned char *mac_addr)
5362 struct e1000_hw *hw = &adapter->hw;
5363 int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */
5365 igb_rar_set(hw, mac_addr, rar_entry);
5367 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5369 igb_set_rah_pool(hw, vf, rar_entry);
5374 static void igb_vmm_control(struct igb_adapter *adapter)
5376 struct e1000_hw *hw = &adapter->hw;
5379 if (!adapter->vfs_allocated_count)
5382 /* VF's need PF reset indication before they
5383 * can send/receive mail */
5384 reg_data = rd32(E1000_CTRL_EXT);
5385 reg_data |= E1000_CTRL_EXT_PFRSTD;
5386 wr32(E1000_CTRL_EXT, reg_data);
5388 igb_vmdq_set_loopback_pf(hw, true);
5389 igb_vmdq_set_replication_pf(hw, true);
5392 #ifdef CONFIG_PCI_IOV
5393 static ssize_t igb_show_num_vfs(struct device *dev,
5394 struct device_attribute *attr, char *buf)
5396 struct igb_adapter *adapter = netdev_priv(to_net_dev(dev));
5398 return sprintf(buf, "%d\n", adapter->vfs_allocated_count);
5401 static ssize_t igb_set_num_vfs(struct device *dev,
5402 struct device_attribute *attr,
5403 const char *buf, size_t count)
5405 struct net_device *netdev = to_net_dev(dev);
5406 struct igb_adapter *adapter = netdev_priv(netdev);
5407 struct e1000_hw *hw = &adapter->hw;
5408 struct pci_dev *pdev = adapter->pdev;
5409 unsigned int num_vfs, i;
5410 unsigned char mac_addr[ETH_ALEN];
5413 sscanf(buf, "%u", &num_vfs);
5418 /* value unchanged do nothing */
5419 if (num_vfs == adapter->vfs_allocated_count)
5422 if (netdev->flags & IFF_UP)
5425 igb_reset_interrupt_capability(adapter);
5426 igb_free_queues(adapter);
5427 adapter->tx_ring = NULL;
5428 adapter->rx_ring = NULL;
5429 adapter->vfs_allocated_count = 0;
5431 /* reclaim resources allocated to VFs since we are changing count */
5432 if (adapter->vf_data) {
5433 /* disable iov and allow time for transactions to clear */
5434 pci_disable_sriov(pdev);
5437 kfree(adapter->vf_data);
5438 adapter->vf_data = NULL;
5439 wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
5441 dev_info(&pdev->dev, "IOV Disabled\n");
5445 adapter->vf_data = kcalloc(num_vfs,
5446 sizeof(struct vf_data_storage),
5448 if (!adapter->vf_data) {
5449 dev_err(&pdev->dev, "Could not allocate VF private "
5450 "data - IOV enable failed\n");
5452 err = pci_enable_sriov(pdev, num_vfs);
5454 adapter->vfs_allocated_count = num_vfs;
5455 dev_info(&pdev->dev, "%d vfs allocated\n", num_vfs);
5456 for (i = 0; i < adapter->vfs_allocated_count; i++) {
5457 random_ether_addr(mac_addr);
5458 igb_set_vf_mac(adapter, i, mac_addr);
5461 kfree(adapter->vf_data);
5462 adapter->vf_data = NULL;
5467 igb_set_interrupt_capability(adapter);
5468 igb_alloc_queues(adapter);
5471 if (netdev->flags & IFF_UP)
5476 #endif /* CONFIG_PCI_IOV */