1 /*******************************************************************************
3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2006 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
30 #include <net/ip6_checksum.h>
32 char e1000_driver_name[] = "e1000";
33 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34 #ifndef CONFIG_E1000_NAPI
37 #define DRIVERNAPI "-NAPI"
39 #define DRV_VERSION "7.3.15-k2"DRIVERNAPI
40 char e1000_driver_version[] = DRV_VERSION;
41 static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
43 /* e1000_pci_tbl - PCI Device ID Table
45 * Last entry must be all 0s
48 * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
50 static struct pci_device_id e1000_pci_tbl[] = {
51 INTEL_E1000_ETHERNET_DEVICE(0x1000),
52 INTEL_E1000_ETHERNET_DEVICE(0x1001),
53 INTEL_E1000_ETHERNET_DEVICE(0x1004),
54 INTEL_E1000_ETHERNET_DEVICE(0x1008),
55 INTEL_E1000_ETHERNET_DEVICE(0x1009),
56 INTEL_E1000_ETHERNET_DEVICE(0x100C),
57 INTEL_E1000_ETHERNET_DEVICE(0x100D),
58 INTEL_E1000_ETHERNET_DEVICE(0x100E),
59 INTEL_E1000_ETHERNET_DEVICE(0x100F),
60 INTEL_E1000_ETHERNET_DEVICE(0x1010),
61 INTEL_E1000_ETHERNET_DEVICE(0x1011),
62 INTEL_E1000_ETHERNET_DEVICE(0x1012),
63 INTEL_E1000_ETHERNET_DEVICE(0x1013),
64 INTEL_E1000_ETHERNET_DEVICE(0x1014),
65 INTEL_E1000_ETHERNET_DEVICE(0x1015),
66 INTEL_E1000_ETHERNET_DEVICE(0x1016),
67 INTEL_E1000_ETHERNET_DEVICE(0x1017),
68 INTEL_E1000_ETHERNET_DEVICE(0x1018),
69 INTEL_E1000_ETHERNET_DEVICE(0x1019),
70 INTEL_E1000_ETHERNET_DEVICE(0x101A),
71 INTEL_E1000_ETHERNET_DEVICE(0x101D),
72 INTEL_E1000_ETHERNET_DEVICE(0x101E),
73 INTEL_E1000_ETHERNET_DEVICE(0x1026),
74 INTEL_E1000_ETHERNET_DEVICE(0x1027),
75 INTEL_E1000_ETHERNET_DEVICE(0x1028),
76 INTEL_E1000_ETHERNET_DEVICE(0x1049),
77 INTEL_E1000_ETHERNET_DEVICE(0x104A),
78 INTEL_E1000_ETHERNET_DEVICE(0x104B),
79 INTEL_E1000_ETHERNET_DEVICE(0x104C),
80 INTEL_E1000_ETHERNET_DEVICE(0x104D),
81 INTEL_E1000_ETHERNET_DEVICE(0x105E),
82 INTEL_E1000_ETHERNET_DEVICE(0x105F),
83 INTEL_E1000_ETHERNET_DEVICE(0x1060),
84 INTEL_E1000_ETHERNET_DEVICE(0x1075),
85 INTEL_E1000_ETHERNET_DEVICE(0x1076),
86 INTEL_E1000_ETHERNET_DEVICE(0x1077),
87 INTEL_E1000_ETHERNET_DEVICE(0x1078),
88 INTEL_E1000_ETHERNET_DEVICE(0x1079),
89 INTEL_E1000_ETHERNET_DEVICE(0x107A),
90 INTEL_E1000_ETHERNET_DEVICE(0x107B),
91 INTEL_E1000_ETHERNET_DEVICE(0x107C),
92 INTEL_E1000_ETHERNET_DEVICE(0x107D),
93 INTEL_E1000_ETHERNET_DEVICE(0x107E),
94 INTEL_E1000_ETHERNET_DEVICE(0x107F),
95 INTEL_E1000_ETHERNET_DEVICE(0x108A),
96 INTEL_E1000_ETHERNET_DEVICE(0x108B),
97 INTEL_E1000_ETHERNET_DEVICE(0x108C),
98 INTEL_E1000_ETHERNET_DEVICE(0x1096),
99 INTEL_E1000_ETHERNET_DEVICE(0x1098),
100 INTEL_E1000_ETHERNET_DEVICE(0x1099),
101 INTEL_E1000_ETHERNET_DEVICE(0x109A),
102 INTEL_E1000_ETHERNET_DEVICE(0x10A4),
103 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
104 INTEL_E1000_ETHERNET_DEVICE(0x10B9),
105 INTEL_E1000_ETHERNET_DEVICE(0x10BA),
106 INTEL_E1000_ETHERNET_DEVICE(0x10BB),
107 INTEL_E1000_ETHERNET_DEVICE(0x10BC),
108 INTEL_E1000_ETHERNET_DEVICE(0x10C4),
109 INTEL_E1000_ETHERNET_DEVICE(0x10C5),
110 /* required last entry */
114 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
116 int e1000_up(struct e1000_adapter *adapter);
117 void e1000_down(struct e1000_adapter *adapter);
118 void e1000_reinit_locked(struct e1000_adapter *adapter);
119 void e1000_reset(struct e1000_adapter *adapter);
120 int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
121 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
122 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
123 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
124 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
125 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
126 struct e1000_tx_ring *txdr);
127 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
128 struct e1000_rx_ring *rxdr);
129 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
130 struct e1000_tx_ring *tx_ring);
131 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
132 struct e1000_rx_ring *rx_ring);
133 void e1000_update_stats(struct e1000_adapter *adapter);
135 static int e1000_init_module(void);
136 static void e1000_exit_module(void);
137 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
138 static void __devexit e1000_remove(struct pci_dev *pdev);
139 static int e1000_alloc_queues(struct e1000_adapter *adapter);
140 static int e1000_sw_init(struct e1000_adapter *adapter);
141 static int e1000_open(struct net_device *netdev);
142 static int e1000_close(struct net_device *netdev);
143 static void e1000_configure_tx(struct e1000_adapter *adapter);
144 static void e1000_configure_rx(struct e1000_adapter *adapter);
145 static void e1000_setup_rctl(struct e1000_adapter *adapter);
146 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
147 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
148 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
149 struct e1000_tx_ring *tx_ring);
150 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
151 struct e1000_rx_ring *rx_ring);
152 static void e1000_set_multi(struct net_device *netdev);
153 static void e1000_update_phy_info(unsigned long data);
154 static void e1000_watchdog(unsigned long data);
155 static void e1000_82547_tx_fifo_stall(unsigned long data);
156 static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
157 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
158 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
159 static int e1000_set_mac(struct net_device *netdev, void *p);
160 static irqreturn_t e1000_intr(int irq, void *data);
161 #ifdef CONFIG_PCI_MSI
162 static irqreturn_t e1000_intr_msi(int irq, void *data);
164 static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
165 struct e1000_tx_ring *tx_ring);
166 #ifdef CONFIG_E1000_NAPI
167 static int e1000_clean(struct net_device *poll_dev, int *budget);
168 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
169 struct e1000_rx_ring *rx_ring,
170 int *work_done, int work_to_do);
171 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
172 struct e1000_rx_ring *rx_ring,
173 int *work_done, int work_to_do);
175 static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
176 struct e1000_rx_ring *rx_ring);
177 static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
178 struct e1000_rx_ring *rx_ring);
180 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
181 struct e1000_rx_ring *rx_ring,
183 static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
184 struct e1000_rx_ring *rx_ring,
186 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
187 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
189 void e1000_set_ethtool_ops(struct net_device *netdev);
190 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
191 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
192 static void e1000_tx_timeout(struct net_device *dev);
193 static void e1000_reset_task(struct work_struct *work);
194 static void e1000_smartspeed(struct e1000_adapter *adapter);
195 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
196 struct sk_buff *skb);
198 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
199 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
200 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
201 static void e1000_restore_vlan(struct e1000_adapter *adapter);
203 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
205 static int e1000_resume(struct pci_dev *pdev);
207 static void e1000_shutdown(struct pci_dev *pdev);
209 #ifdef CONFIG_NET_POLL_CONTROLLER
210 /* for netdump / net console */
211 static void e1000_netpoll (struct net_device *netdev);
214 extern void e1000_check_options(struct e1000_adapter *adapter);
216 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
217 pci_channel_state_t state);
218 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
219 static void e1000_io_resume(struct pci_dev *pdev);
221 static struct pci_error_handlers e1000_err_handler = {
222 .error_detected = e1000_io_error_detected,
223 .slot_reset = e1000_io_slot_reset,
224 .resume = e1000_io_resume,
227 static struct pci_driver e1000_driver = {
228 .name = e1000_driver_name,
229 .id_table = e1000_pci_tbl,
230 .probe = e1000_probe,
231 .remove = __devexit_p(e1000_remove),
233 /* Power Managment Hooks */
234 .suspend = e1000_suspend,
235 .resume = e1000_resume,
237 .shutdown = e1000_shutdown,
238 .err_handler = &e1000_err_handler
241 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
242 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
243 MODULE_LICENSE("GPL");
244 MODULE_VERSION(DRV_VERSION);
246 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
247 module_param(debug, int, 0);
248 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
251 * e1000_init_module - Driver Registration Routine
253 * e1000_init_module is the first routine called when the driver is
254 * loaded. All it does is register with the PCI subsystem.
258 e1000_init_module(void)
261 printk(KERN_INFO "%s - version %s\n",
262 e1000_driver_string, e1000_driver_version);
264 printk(KERN_INFO "%s\n", e1000_copyright);
266 ret = pci_register_driver(&e1000_driver);
271 module_init(e1000_init_module);
274 * e1000_exit_module - Driver Exit Cleanup Routine
276 * e1000_exit_module is called just before the driver is removed
281 e1000_exit_module(void)
283 pci_unregister_driver(&e1000_driver);
286 module_exit(e1000_exit_module);
288 static int e1000_request_irq(struct e1000_adapter *adapter)
290 struct net_device *netdev = adapter->netdev;
294 #ifdef CONFIG_PCI_MSI
295 if (adapter->hw.mac_type >= e1000_82571) {
296 adapter->have_msi = TRUE;
297 if ((err = pci_enable_msi(adapter->pdev))) {
299 "Unable to allocate MSI interrupt Error: %d\n", err);
300 adapter->have_msi = FALSE;
303 if (adapter->have_msi) {
304 flags &= ~IRQF_SHARED;
305 err = request_irq(adapter->pdev->irq, &e1000_intr_msi, flags,
306 netdev->name, netdev);
309 "Unable to allocate interrupt Error: %d\n", err);
312 if ((err = request_irq(adapter->pdev->irq, &e1000_intr, flags,
313 netdev->name, netdev)))
315 "Unable to allocate interrupt Error: %d\n", err);
320 static void e1000_free_irq(struct e1000_adapter *adapter)
322 struct net_device *netdev = adapter->netdev;
324 free_irq(adapter->pdev->irq, netdev);
326 #ifdef CONFIG_PCI_MSI
327 if (adapter->have_msi)
328 pci_disable_msi(adapter->pdev);
333 * e1000_irq_disable - Mask off interrupt generation on the NIC
334 * @adapter: board private structure
338 e1000_irq_disable(struct e1000_adapter *adapter)
340 atomic_inc(&adapter->irq_sem);
341 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
342 E1000_WRITE_FLUSH(&adapter->hw);
343 synchronize_irq(adapter->pdev->irq);
347 * e1000_irq_enable - Enable default interrupt generation settings
348 * @adapter: board private structure
352 e1000_irq_enable(struct e1000_adapter *adapter)
354 if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
355 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
356 E1000_WRITE_FLUSH(&adapter->hw);
361 e1000_update_mng_vlan(struct e1000_adapter *adapter)
363 struct net_device *netdev = adapter->netdev;
364 uint16_t vid = adapter->hw.mng_cookie.vlan_id;
365 uint16_t old_vid = adapter->mng_vlan_id;
366 if (adapter->vlgrp) {
367 if (!adapter->vlgrp->vlan_devices[vid]) {
368 if (adapter->hw.mng_cookie.status &
369 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
370 e1000_vlan_rx_add_vid(netdev, vid);
371 adapter->mng_vlan_id = vid;
373 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
375 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
377 !adapter->vlgrp->vlan_devices[old_vid])
378 e1000_vlan_rx_kill_vid(netdev, old_vid);
380 adapter->mng_vlan_id = vid;
385 * e1000_release_hw_control - release control of the h/w to f/w
386 * @adapter: address of board private structure
388 * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
389 * For ASF and Pass Through versions of f/w this means that the
390 * driver is no longer loaded. For AMT version (only with 82573) i
391 * of the f/w this means that the network i/f is closed.
396 e1000_release_hw_control(struct e1000_adapter *adapter)
402 /* Let firmware taken over control of h/w */
403 switch (adapter->hw.mac_type) {
406 case e1000_80003es2lan:
407 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
408 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
409 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
412 swsm = E1000_READ_REG(&adapter->hw, SWSM);
413 E1000_WRITE_REG(&adapter->hw, SWSM,
414 swsm & ~E1000_SWSM_DRV_LOAD);
416 extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
417 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
418 extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
426 * e1000_get_hw_control - get control of the h/w from f/w
427 * @adapter: address of board private structure
429 * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
430 * For ASF and Pass Through versions of f/w this means that
431 * the driver is loaded. For AMT version (only with 82573)
432 * of the f/w this means that the network i/f is open.
437 e1000_get_hw_control(struct e1000_adapter *adapter)
443 /* Let firmware know the driver has taken over */
444 switch (adapter->hw.mac_type) {
447 case e1000_80003es2lan:
448 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
449 E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
450 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
453 swsm = E1000_READ_REG(&adapter->hw, SWSM);
454 E1000_WRITE_REG(&adapter->hw, SWSM,
455 swsm | E1000_SWSM_DRV_LOAD);
458 extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
459 E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
460 extcnf | E1000_EXTCNF_CTRL_SWFLAG);
468 e1000_init_manageability(struct e1000_adapter *adapter)
470 if (adapter->en_mng_pt) {
471 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
473 /* disable hardware interception of ARP */
474 manc &= ~(E1000_MANC_ARP_EN);
476 /* enable receiving management packets to the host */
477 /* this will probably generate destination unreachable messages
478 * from the host OS, but the packets will be handled on SMBUS */
479 if (adapter->hw.has_manc2h) {
480 uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
482 manc |= E1000_MANC_EN_MNG2HOST;
483 #define E1000_MNG2HOST_PORT_623 (1 << 5)
484 #define E1000_MNG2HOST_PORT_664 (1 << 6)
485 manc2h |= E1000_MNG2HOST_PORT_623;
486 manc2h |= E1000_MNG2HOST_PORT_664;
487 E1000_WRITE_REG(&adapter->hw, MANC2H, manc2h);
490 E1000_WRITE_REG(&adapter->hw, MANC, manc);
495 e1000_release_manageability(struct e1000_adapter *adapter)
497 if (adapter->en_mng_pt) {
498 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
500 /* re-enable hardware interception of ARP */
501 manc |= E1000_MANC_ARP_EN;
503 if (adapter->hw.has_manc2h)
504 manc &= ~E1000_MANC_EN_MNG2HOST;
506 /* don't explicitly have to mess with MANC2H since
507 * MANC has an enable disable that gates MANC2H */
509 E1000_WRITE_REG(&adapter->hw, MANC, manc);
514 e1000_up(struct e1000_adapter *adapter)
516 struct net_device *netdev = adapter->netdev;
519 /* hardware has been reset, we need to reload some things */
521 e1000_set_multi(netdev);
523 e1000_restore_vlan(adapter);
524 e1000_init_manageability(adapter);
526 e1000_configure_tx(adapter);
527 e1000_setup_rctl(adapter);
528 e1000_configure_rx(adapter);
529 /* call E1000_DESC_UNUSED which always leaves
530 * at least 1 descriptor unused to make sure
531 * next_to_use != next_to_clean */
532 for (i = 0; i < adapter->num_rx_queues; i++) {
533 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
534 adapter->alloc_rx_buf(adapter, ring,
535 E1000_DESC_UNUSED(ring));
538 adapter->tx_queue_len = netdev->tx_queue_len;
540 #ifdef CONFIG_E1000_NAPI
541 netif_poll_enable(netdev);
543 e1000_irq_enable(adapter);
545 clear_bit(__E1000_DOWN, &adapter->flags);
547 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
552 * e1000_power_up_phy - restore link in case the phy was powered down
553 * @adapter: address of board private structure
555 * The phy may be powered down to save power and turn off link when the
556 * driver is unloaded and wake on lan is not enabled (among others)
557 * *** this routine MUST be followed by a call to e1000_reset ***
561 void e1000_power_up_phy(struct e1000_adapter *adapter)
563 uint16_t mii_reg = 0;
565 /* Just clear the power down bit to wake the phy back up */
566 if (adapter->hw.media_type == e1000_media_type_copper) {
567 /* according to the manual, the phy will retain its
568 * settings across a power-down/up cycle */
569 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
570 mii_reg &= ~MII_CR_POWER_DOWN;
571 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
575 static void e1000_power_down_phy(struct e1000_adapter *adapter)
577 /* Power down the PHY so no link is implied when interface is down *
578 * The PHY cannot be powered down if any of the following is TRUE *
581 * (c) SoL/IDER session is active */
582 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
583 adapter->hw.media_type == e1000_media_type_copper) {
584 uint16_t mii_reg = 0;
586 switch (adapter->hw.mac_type) {
589 case e1000_82545_rev_3:
591 case e1000_82546_rev_3:
593 case e1000_82541_rev_2:
595 case e1000_82547_rev_2:
596 if (E1000_READ_REG(&adapter->hw, MANC) &
603 case e1000_80003es2lan:
605 if (e1000_check_mng_mode(&adapter->hw) ||
606 e1000_check_phy_reset_block(&adapter->hw))
612 e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
613 mii_reg |= MII_CR_POWER_DOWN;
614 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
622 e1000_down(struct e1000_adapter *adapter)
624 struct net_device *netdev = adapter->netdev;
626 /* signal that we're down so the interrupt handler does not
627 * reschedule our watchdog timer */
628 set_bit(__E1000_DOWN, &adapter->flags);
630 e1000_irq_disable(adapter);
632 del_timer_sync(&adapter->tx_fifo_stall_timer);
633 del_timer_sync(&adapter->watchdog_timer);
634 del_timer_sync(&adapter->phy_info_timer);
636 #ifdef CONFIG_E1000_NAPI
637 netif_poll_disable(netdev);
639 netdev->tx_queue_len = adapter->tx_queue_len;
640 adapter->link_speed = 0;
641 adapter->link_duplex = 0;
642 netif_carrier_off(netdev);
643 netif_stop_queue(netdev);
645 e1000_reset(adapter);
646 e1000_clean_all_tx_rings(adapter);
647 e1000_clean_all_rx_rings(adapter);
651 e1000_reinit_locked(struct e1000_adapter *adapter)
653 WARN_ON(in_interrupt());
654 while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
658 clear_bit(__E1000_RESETTING, &adapter->flags);
662 e1000_reset(struct e1000_adapter *adapter)
665 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
667 /* Repartition Pba for greater than 9k mtu
668 * To take effect CTRL.RST is required.
671 switch (adapter->hw.mac_type) {
673 case e1000_82547_rev_2:
678 case e1000_80003es2lan:
692 if ((adapter->hw.mac_type != e1000_82573) &&
693 (adapter->netdev->mtu > E1000_RXBUFFER_8192))
694 pba -= 8; /* allocate more FIFO for Tx */
697 if (adapter->hw.mac_type == e1000_82547) {
698 adapter->tx_fifo_head = 0;
699 adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
700 adapter->tx_fifo_size =
701 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
702 atomic_set(&adapter->tx_fifo_stall, 0);
705 E1000_WRITE_REG(&adapter->hw, PBA, pba);
707 /* flow control settings */
708 /* Set the FC high water mark to 90% of the FIFO size.
709 * Required to clear last 3 LSB */
710 fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
711 /* We can't use 90% on small FIFOs because the remainder
712 * would be less than 1 full frame. In this case, we size
713 * it to allow at least a full frame above the high water
715 if (pba < E1000_PBA_16K)
716 fc_high_water_mark = (pba * 1024) - 1600;
718 adapter->hw.fc_high_water = fc_high_water_mark;
719 adapter->hw.fc_low_water = fc_high_water_mark - 8;
720 if (adapter->hw.mac_type == e1000_80003es2lan)
721 adapter->hw.fc_pause_time = 0xFFFF;
723 adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
724 adapter->hw.fc_send_xon = 1;
725 adapter->hw.fc = adapter->hw.original_fc;
727 /* Allow time for pending master requests to run */
728 e1000_reset_hw(&adapter->hw);
729 if (adapter->hw.mac_type >= e1000_82544)
730 E1000_WRITE_REG(&adapter->hw, WUC, 0);
732 if (e1000_init_hw(&adapter->hw))
733 DPRINTK(PROBE, ERR, "Hardware Error\n");
734 e1000_update_mng_vlan(adapter);
735 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
736 E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
738 e1000_reset_adaptive(&adapter->hw);
739 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
741 if (!adapter->smart_power_down &&
742 (adapter->hw.mac_type == e1000_82571 ||
743 adapter->hw.mac_type == e1000_82572)) {
744 uint16_t phy_data = 0;
745 /* speed up time to link by disabling smart power down, ignore
746 * the return value of this function because there is nothing
747 * different we would do if it failed */
748 e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
750 phy_data &= ~IGP02E1000_PM_SPD;
751 e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
755 e1000_release_manageability(adapter);
759 * e1000_probe - Device Initialization Routine
760 * @pdev: PCI device information struct
761 * @ent: entry in e1000_pci_tbl
763 * Returns 0 on success, negative on failure
765 * e1000_probe initializes an adapter identified by a pci_dev structure.
766 * The OS initialization, configuring of the adapter private structure,
767 * and a hardware reset occur.
771 e1000_probe(struct pci_dev *pdev,
772 const struct pci_device_id *ent)
774 struct net_device *netdev;
775 struct e1000_adapter *adapter;
776 unsigned long mmio_start, mmio_len;
777 unsigned long flash_start, flash_len;
779 static int cards_found = 0;
780 static int global_quad_port_a = 0; /* global ksp3 port a indication */
781 int i, err, pci_using_dac;
782 uint16_t eeprom_data = 0;
783 uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
784 if ((err = pci_enable_device(pdev)))
787 if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
788 !(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
791 if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) &&
792 (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
793 E1000_ERR("No usable DMA configuration, aborting\n");
799 if ((err = pci_request_regions(pdev, e1000_driver_name)))
802 pci_set_master(pdev);
805 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
807 goto err_alloc_etherdev;
809 SET_MODULE_OWNER(netdev);
810 SET_NETDEV_DEV(netdev, &pdev->dev);
812 pci_set_drvdata(pdev, netdev);
813 adapter = netdev_priv(netdev);
814 adapter->netdev = netdev;
815 adapter->pdev = pdev;
816 adapter->hw.back = adapter;
817 adapter->msg_enable = (1 << debug) - 1;
819 mmio_start = pci_resource_start(pdev, BAR_0);
820 mmio_len = pci_resource_len(pdev, BAR_0);
823 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
824 if (!adapter->hw.hw_addr)
827 for (i = BAR_1; i <= BAR_5; i++) {
828 if (pci_resource_len(pdev, i) == 0)
830 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
831 adapter->hw.io_base = pci_resource_start(pdev, i);
836 netdev->open = &e1000_open;
837 netdev->stop = &e1000_close;
838 netdev->hard_start_xmit = &e1000_xmit_frame;
839 netdev->get_stats = &e1000_get_stats;
840 netdev->set_multicast_list = &e1000_set_multi;
841 netdev->set_mac_address = &e1000_set_mac;
842 netdev->change_mtu = &e1000_change_mtu;
843 netdev->do_ioctl = &e1000_ioctl;
844 e1000_set_ethtool_ops(netdev);
845 netdev->tx_timeout = &e1000_tx_timeout;
846 netdev->watchdog_timeo = 5 * HZ;
847 #ifdef CONFIG_E1000_NAPI
848 netdev->poll = &e1000_clean;
851 netdev->vlan_rx_register = e1000_vlan_rx_register;
852 netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
853 netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
854 #ifdef CONFIG_NET_POLL_CONTROLLER
855 netdev->poll_controller = e1000_netpoll;
857 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
859 netdev->mem_start = mmio_start;
860 netdev->mem_end = mmio_start + mmio_len;
861 netdev->base_addr = adapter->hw.io_base;
863 adapter->bd_number = cards_found;
865 /* setup the private structure */
867 if ((err = e1000_sw_init(adapter)))
871 /* Flash BAR mapping must happen after e1000_sw_init
872 * because it depends on mac_type */
873 if ((adapter->hw.mac_type == e1000_ich8lan) &&
874 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
875 flash_start = pci_resource_start(pdev, 1);
876 flash_len = pci_resource_len(pdev, 1);
877 adapter->hw.flash_address = ioremap(flash_start, flash_len);
878 if (!adapter->hw.flash_address)
882 if (e1000_check_phy_reset_block(&adapter->hw))
883 DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
885 if (adapter->hw.mac_type >= e1000_82543) {
886 netdev->features = NETIF_F_SG |
890 NETIF_F_HW_VLAN_FILTER;
891 if (adapter->hw.mac_type == e1000_ich8lan)
892 netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
896 if ((adapter->hw.mac_type >= e1000_82544) &&
897 (adapter->hw.mac_type != e1000_82547))
898 netdev->features |= NETIF_F_TSO;
901 if (adapter->hw.mac_type > e1000_82547_rev_2)
902 netdev->features |= NETIF_F_TSO6;
906 netdev->features |= NETIF_F_HIGHDMA;
908 netdev->features |= NETIF_F_LLTX;
910 adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
912 /* initialize eeprom parameters */
914 if (e1000_init_eeprom_params(&adapter->hw)) {
915 E1000_ERR("EEPROM initialization failed\n");
919 /* before reading the EEPROM, reset the controller to
920 * put the device in a known good starting state */
922 e1000_reset_hw(&adapter->hw);
924 /* make sure the EEPROM is good */
926 if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
927 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
931 /* copy the MAC address out of the EEPROM */
933 if (e1000_read_mac_addr(&adapter->hw))
934 DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
935 memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
936 memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
938 if (!is_valid_ether_addr(netdev->perm_addr)) {
939 DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
943 e1000_get_bus_info(&adapter->hw);
945 init_timer(&adapter->tx_fifo_stall_timer);
946 adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
947 adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
949 init_timer(&adapter->watchdog_timer);
950 adapter->watchdog_timer.function = &e1000_watchdog;
951 adapter->watchdog_timer.data = (unsigned long) adapter;
953 init_timer(&adapter->phy_info_timer);
954 adapter->phy_info_timer.function = &e1000_update_phy_info;
955 adapter->phy_info_timer.data = (unsigned long) adapter;
957 INIT_WORK(&adapter->reset_task, e1000_reset_task);
959 e1000_check_options(adapter);
961 /* Initial Wake on LAN setting
962 * If APM wake is enabled in the EEPROM,
963 * enable the ACPI Magic Packet filter
966 switch (adapter->hw.mac_type) {
967 case e1000_82542_rev2_0:
968 case e1000_82542_rev2_1:
972 e1000_read_eeprom(&adapter->hw,
973 EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
974 eeprom_apme_mask = E1000_EEPROM_82544_APM;
977 e1000_read_eeprom(&adapter->hw,
978 EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
979 eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
982 case e1000_82546_rev_3:
984 case e1000_80003es2lan:
985 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
986 e1000_read_eeprom(&adapter->hw,
987 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
992 e1000_read_eeprom(&adapter->hw,
993 EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
996 if (eeprom_data & eeprom_apme_mask)
997 adapter->eeprom_wol |= E1000_WUFC_MAG;
999 /* now that we have the eeprom settings, apply the special cases
1000 * where the eeprom may be wrong or the board simply won't support
1001 * wake on lan on a particular port */
1002 switch (pdev->device) {
1003 case E1000_DEV_ID_82546GB_PCIE:
1004 adapter->eeprom_wol = 0;
1006 case E1000_DEV_ID_82546EB_FIBER:
1007 case E1000_DEV_ID_82546GB_FIBER:
1008 case E1000_DEV_ID_82571EB_FIBER:
1009 /* Wake events only supported on port A for dual fiber
1010 * regardless of eeprom setting */
1011 if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
1012 adapter->eeprom_wol = 0;
1014 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1015 case E1000_DEV_ID_82571EB_QUAD_COPPER:
1016 case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
1017 /* if quad port adapter, disable WoL on all but port A */
1018 if (global_quad_port_a != 0)
1019 adapter->eeprom_wol = 0;
1021 adapter->quad_port_a = 1;
1022 /* Reset for multiple quad port adapters */
1023 if (++global_quad_port_a == 4)
1024 global_quad_port_a = 0;
1028 /* initialize the wol settings based on the eeprom settings */
1029 adapter->wol = adapter->eeprom_wol;
1031 /* print bus type/speed/width info */
1033 struct e1000_hw *hw = &adapter->hw;
1034 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
1035 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
1036 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
1037 ((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1038 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1039 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
1040 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
1041 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1042 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
1043 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
1044 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
1048 for (i = 0; i < 6; i++)
1049 printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
1051 /* reset the hardware with the new settings */
1052 e1000_reset(adapter);
1054 /* If the controller is 82573 and f/w is AMT, do not set
1055 * DRV_LOAD until the interface is up. For all other cases,
1056 * let the f/w know that the h/w is now under the control
1058 if (adapter->hw.mac_type != e1000_82573 ||
1059 !e1000_check_mng_mode(&adapter->hw))
1060 e1000_get_hw_control(adapter);
1062 strcpy(netdev->name, "eth%d");
1063 if ((err = register_netdev(netdev)))
1066 /* tell the stack to leave us alone until e1000_open() is called */
1067 netif_carrier_off(netdev);
1068 netif_stop_queue(netdev);
1070 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
1076 e1000_release_hw_control(adapter);
1078 if (!e1000_check_phy_reset_block(&adapter->hw))
1079 e1000_phy_hw_reset(&adapter->hw);
1081 if (adapter->hw.flash_address)
1082 iounmap(adapter->hw.flash_address);
1084 #ifdef CONFIG_E1000_NAPI
1085 for (i = 0; i < adapter->num_rx_queues; i++)
1086 dev_put(&adapter->polling_netdev[i]);
1089 kfree(adapter->tx_ring);
1090 kfree(adapter->rx_ring);
1091 #ifdef CONFIG_E1000_NAPI
1092 kfree(adapter->polling_netdev);
1095 iounmap(adapter->hw.hw_addr);
1097 free_netdev(netdev);
1099 pci_release_regions(pdev);
1102 pci_disable_device(pdev);
1107 * e1000_remove - Device Removal Routine
1108 * @pdev: PCI device information struct
1110 * e1000_remove is called by the PCI subsystem to alert the driver
1111 * that it should release a PCI device. The could be caused by a
1112 * Hot-Plug event, or because the driver is going to be removed from
1116 static void __devexit
1117 e1000_remove(struct pci_dev *pdev)
1119 struct net_device *netdev = pci_get_drvdata(pdev);
1120 struct e1000_adapter *adapter = netdev_priv(netdev);
1121 #ifdef CONFIG_E1000_NAPI
1125 flush_scheduled_work();
1127 e1000_release_manageability(adapter);
1129 /* Release control of h/w to f/w. If f/w is AMT enabled, this
1130 * would have already happened in close and is redundant. */
1131 e1000_release_hw_control(adapter);
1133 unregister_netdev(netdev);
1134 #ifdef CONFIG_E1000_NAPI
1135 for (i = 0; i < adapter->num_rx_queues; i++)
1136 dev_put(&adapter->polling_netdev[i]);
1139 if (!e1000_check_phy_reset_block(&adapter->hw))
1140 e1000_phy_hw_reset(&adapter->hw);
1142 kfree(adapter->tx_ring);
1143 kfree(adapter->rx_ring);
1144 #ifdef CONFIG_E1000_NAPI
1145 kfree(adapter->polling_netdev);
1148 iounmap(adapter->hw.hw_addr);
1149 if (adapter->hw.flash_address)
1150 iounmap(adapter->hw.flash_address);
1151 pci_release_regions(pdev);
1153 free_netdev(netdev);
1155 pci_disable_device(pdev);
1159 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1160 * @adapter: board private structure to initialize
1162 * e1000_sw_init initializes the Adapter private data structure.
1163 * Fields are initialized based on PCI device information and
1164 * OS network device settings (MTU size).
1167 static int __devinit
1168 e1000_sw_init(struct e1000_adapter *adapter)
1170 struct e1000_hw *hw = &adapter->hw;
1171 struct net_device *netdev = adapter->netdev;
1172 struct pci_dev *pdev = adapter->pdev;
1173 #ifdef CONFIG_E1000_NAPI
1177 /* PCI config space info */
1179 hw->vendor_id = pdev->vendor;
1180 hw->device_id = pdev->device;
1181 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1182 hw->subsystem_id = pdev->subsystem_device;
1184 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
1186 pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
1188 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1189 adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
1190 hw->max_frame_size = netdev->mtu +
1191 ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1192 hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
1194 /* identify the MAC */
1196 if (e1000_set_mac_type(hw)) {
1197 DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
1201 switch (hw->mac_type) {
1206 case e1000_82541_rev_2:
1207 case e1000_82547_rev_2:
1208 hw->phy_init_script = 1;
1212 e1000_set_media_type(hw);
1214 hw->wait_autoneg_complete = FALSE;
1215 hw->tbi_compatibility_en = TRUE;
1216 hw->adaptive_ifs = TRUE;
1218 /* Copper options */
1220 if (hw->media_type == e1000_media_type_copper) {
1221 hw->mdix = AUTO_ALL_MODES;
1222 hw->disable_polarity_correction = FALSE;
1223 hw->master_slave = E1000_MASTER_SLAVE;
1226 adapter->num_tx_queues = 1;
1227 adapter->num_rx_queues = 1;
1229 if (e1000_alloc_queues(adapter)) {
1230 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
1234 #ifdef CONFIG_E1000_NAPI
1235 for (i = 0; i < adapter->num_rx_queues; i++) {
1236 adapter->polling_netdev[i].priv = adapter;
1237 adapter->polling_netdev[i].poll = &e1000_clean;
1238 adapter->polling_netdev[i].weight = 64;
1239 dev_hold(&adapter->polling_netdev[i]);
1240 set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
1242 spin_lock_init(&adapter->tx_queue_lock);
1245 atomic_set(&adapter->irq_sem, 1);
1246 spin_lock_init(&adapter->stats_lock);
1248 set_bit(__E1000_DOWN, &adapter->flags);
1254 * e1000_alloc_queues - Allocate memory for all rings
1255 * @adapter: board private structure to initialize
1257 * We allocate one ring per queue at run-time since we don't know the
1258 * number of queues at compile-time. The polling_netdev array is
1259 * intended for Multiqueue, but should work fine with a single queue.
1262 static int __devinit
1263 e1000_alloc_queues(struct e1000_adapter *adapter)
1267 size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
1268 adapter->tx_ring = kmalloc(size, GFP_KERNEL);
1269 if (!adapter->tx_ring)
1271 memset(adapter->tx_ring, 0, size);
1273 size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
1274 adapter->rx_ring = kmalloc(size, GFP_KERNEL);
1275 if (!adapter->rx_ring) {
1276 kfree(adapter->tx_ring);
1279 memset(adapter->rx_ring, 0, size);
1281 #ifdef CONFIG_E1000_NAPI
1282 size = sizeof(struct net_device) * adapter->num_rx_queues;
1283 adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
1284 if (!adapter->polling_netdev) {
1285 kfree(adapter->tx_ring);
1286 kfree(adapter->rx_ring);
1289 memset(adapter->polling_netdev, 0, size);
1292 return E1000_SUCCESS;
1296 * e1000_open - Called when a network interface is made active
1297 * @netdev: network interface device structure
1299 * Returns 0 on success, negative value on failure
1301 * The open entry point is called when a network interface is made
1302 * active by the system (IFF_UP). At this point all resources needed
1303 * for transmit and receive operations are allocated, the interrupt
1304 * handler is registered with the OS, the watchdog timer is started,
1305 * and the stack is notified that the interface is ready.
1309 e1000_open(struct net_device *netdev)
1311 struct e1000_adapter *adapter = netdev_priv(netdev);
1314 /* disallow open during test */
1315 if (test_bit(__E1000_TESTING, &adapter->flags))
1318 /* allocate transmit descriptors */
1319 if ((err = e1000_setup_all_tx_resources(adapter)))
1322 /* allocate receive descriptors */
1323 if ((err = e1000_setup_all_rx_resources(adapter)))
1326 err = e1000_request_irq(adapter);
1330 e1000_power_up_phy(adapter);
1332 if ((err = e1000_up(adapter)))
1334 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1335 if ((adapter->hw.mng_cookie.status &
1336 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1337 e1000_update_mng_vlan(adapter);
1340 /* If AMT is enabled, let the firmware know that the network
1341 * interface is now open */
1342 if (adapter->hw.mac_type == e1000_82573 &&
1343 e1000_check_mng_mode(&adapter->hw))
1344 e1000_get_hw_control(adapter);
1346 return E1000_SUCCESS;
1349 e1000_power_down_phy(adapter);
1350 e1000_free_irq(adapter);
1352 e1000_free_all_rx_resources(adapter);
1354 e1000_free_all_tx_resources(adapter);
1356 e1000_reset(adapter);
1362 * e1000_close - Disables a network interface
1363 * @netdev: network interface device structure
1365 * Returns 0, this is not allowed to fail
1367 * The close entry point is called when an interface is de-activated
1368 * by the OS. The hardware is still under the drivers control, but
1369 * needs to be disabled. A global MAC reset is issued to stop the
1370 * hardware, and all transmit and receive resources are freed.
1374 e1000_close(struct net_device *netdev)
1376 struct e1000_adapter *adapter = netdev_priv(netdev);
1378 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1379 e1000_down(adapter);
1380 e1000_power_down_phy(adapter);
1381 e1000_free_irq(adapter);
1383 e1000_free_all_tx_resources(adapter);
1384 e1000_free_all_rx_resources(adapter);
1386 /* kill manageability vlan ID if supported, but not if a vlan with
1387 * the same ID is registered on the host OS (let 8021q kill it) */
1388 if ((adapter->hw.mng_cookie.status &
1389 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1391 adapter->vlgrp->vlan_devices[adapter->mng_vlan_id])) {
1392 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1395 /* If AMT is enabled, let the firmware know that the network
1396 * interface is now closed */
1397 if (adapter->hw.mac_type == e1000_82573 &&
1398 e1000_check_mng_mode(&adapter->hw))
1399 e1000_release_hw_control(adapter);
1405 * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1406 * @adapter: address of board private structure
1407 * @start: address of beginning of memory
1408 * @len: length of memory
1411 e1000_check_64k_bound(struct e1000_adapter *adapter,
1412 void *start, unsigned long len)
1414 unsigned long begin = (unsigned long) start;
1415 unsigned long end = begin + len;
1417 /* First rev 82545 and 82546 need to not allow any memory
1418 * write location to cross 64k boundary due to errata 23 */
1419 if (adapter->hw.mac_type == e1000_82545 ||
1420 adapter->hw.mac_type == e1000_82546) {
1421 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
1428 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1429 * @adapter: board private structure
1430 * @txdr: tx descriptor ring (for a specific queue) to setup
1432 * Return 0 on success, negative on failure
1436 e1000_setup_tx_resources(struct e1000_adapter *adapter,
1437 struct e1000_tx_ring *txdr)
1439 struct pci_dev *pdev = adapter->pdev;
1442 size = sizeof(struct e1000_buffer) * txdr->count;
1443 txdr->buffer_info = vmalloc(size);
1444 if (!txdr->buffer_info) {
1446 "Unable to allocate memory for the transmit descriptor ring\n");
1449 memset(txdr->buffer_info, 0, size);
1451 /* round up to nearest 4K */
1453 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1454 E1000_ROUNDUP(txdr->size, 4096);
1456 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1459 vfree(txdr->buffer_info);
1461 "Unable to allocate memory for the transmit descriptor ring\n");
1465 /* Fix for errata 23, can't cross 64kB boundary */
1466 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1467 void *olddesc = txdr->desc;
1468 dma_addr_t olddma = txdr->dma;
1469 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
1470 "at %p\n", txdr->size, txdr->desc);
1471 /* Try again, without freeing the previous */
1472 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
1473 /* Failed allocation, critical failure */
1475 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1476 goto setup_tx_desc_die;
1479 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1481 pci_free_consistent(pdev, txdr->size, txdr->desc,
1483 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1485 "Unable to allocate aligned memory "
1486 "for the transmit descriptor ring\n");
1487 vfree(txdr->buffer_info);
1490 /* Free old allocation, new allocation was successful */
1491 pci_free_consistent(pdev, txdr->size, olddesc, olddma);
1494 memset(txdr->desc, 0, txdr->size);
1496 txdr->next_to_use = 0;
1497 txdr->next_to_clean = 0;
1498 spin_lock_init(&txdr->tx_lock);
1504 * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1505 * (Descriptors) for all queues
1506 * @adapter: board private structure
1508 * Return 0 on success, negative on failure
1512 e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1516 for (i = 0; i < adapter->num_tx_queues; i++) {
1517 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1520 "Allocation for Tx Queue %u failed\n", i);
1521 for (i-- ; i >= 0; i--)
1522 e1000_free_tx_resources(adapter,
1523 &adapter->tx_ring[i]);
1532 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1533 * @adapter: board private structure
1535 * Configure the Tx unit of the MAC after a reset.
1539 e1000_configure_tx(struct e1000_adapter *adapter)
1542 struct e1000_hw *hw = &adapter->hw;
1543 uint32_t tdlen, tctl, tipg, tarc;
1544 uint32_t ipgr1, ipgr2;
1546 /* Setup the HW Tx Head and Tail descriptor pointers */
1548 switch (adapter->num_tx_queues) {
1551 tdba = adapter->tx_ring[0].dma;
1552 tdlen = adapter->tx_ring[0].count *
1553 sizeof(struct e1000_tx_desc);
1554 E1000_WRITE_REG(hw, TDLEN, tdlen);
1555 E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
1556 E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
1557 E1000_WRITE_REG(hw, TDT, 0);
1558 E1000_WRITE_REG(hw, TDH, 0);
1559 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1560 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1564 /* Set the default values for the Tx Inter Packet Gap timer */
1566 if (hw->media_type == e1000_media_type_fiber ||
1567 hw->media_type == e1000_media_type_internal_serdes)
1568 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1570 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1572 switch (hw->mac_type) {
1573 case e1000_82542_rev2_0:
1574 case e1000_82542_rev2_1:
1575 tipg = DEFAULT_82542_TIPG_IPGT;
1576 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1577 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1579 case e1000_80003es2lan:
1580 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1581 ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
1584 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1585 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1588 tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1589 tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1590 E1000_WRITE_REG(hw, TIPG, tipg);
1592 /* Set the Tx Interrupt Delay register */
1594 E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
1595 if (hw->mac_type >= e1000_82540)
1596 E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
1598 /* Program the Transmit Control Register */
1600 tctl = E1000_READ_REG(hw, TCTL);
1601 tctl &= ~E1000_TCTL_CT;
1602 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1603 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1605 if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
1606 tarc = E1000_READ_REG(hw, TARC0);
1607 /* set the speed mode bit, we'll clear it if we're not at
1608 * gigabit link later */
1610 E1000_WRITE_REG(hw, TARC0, tarc);
1611 } else if (hw->mac_type == e1000_80003es2lan) {
1612 tarc = E1000_READ_REG(hw, TARC0);
1614 E1000_WRITE_REG(hw, TARC0, tarc);
1615 tarc = E1000_READ_REG(hw, TARC1);
1617 E1000_WRITE_REG(hw, TARC1, tarc);
1620 e1000_config_collision_dist(hw);
1622 /* Setup Transmit Descriptor Settings for eop descriptor */
1623 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1625 /* only set IDE if we are delaying interrupts using the timers */
1626 if (adapter->tx_int_delay)
1627 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1629 if (hw->mac_type < e1000_82543)
1630 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1632 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1634 /* Cache if we're 82544 running in PCI-X because we'll
1635 * need this to apply a workaround later in the send path. */
1636 if (hw->mac_type == e1000_82544 &&
1637 hw->bus_type == e1000_bus_type_pcix)
1638 adapter->pcix_82544 = 1;
1640 E1000_WRITE_REG(hw, TCTL, tctl);
1645 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1646 * @adapter: board private structure
1647 * @rxdr: rx descriptor ring (for a specific queue) to setup
1649 * Returns 0 on success, negative on failure
1653 e1000_setup_rx_resources(struct e1000_adapter *adapter,
1654 struct e1000_rx_ring *rxdr)
1656 struct pci_dev *pdev = adapter->pdev;
1659 size = sizeof(struct e1000_buffer) * rxdr->count;
1660 rxdr->buffer_info = vmalloc(size);
1661 if (!rxdr->buffer_info) {
1663 "Unable to allocate memory for the receive descriptor ring\n");
1666 memset(rxdr->buffer_info, 0, size);
1668 size = sizeof(struct e1000_ps_page) * rxdr->count;
1669 rxdr->ps_page = kmalloc(size, GFP_KERNEL);
1670 if (!rxdr->ps_page) {
1671 vfree(rxdr->buffer_info);
1673 "Unable to allocate memory for the receive descriptor ring\n");
1676 memset(rxdr->ps_page, 0, size);
1678 size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
1679 rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
1680 if (!rxdr->ps_page_dma) {
1681 vfree(rxdr->buffer_info);
1682 kfree(rxdr->ps_page);
1684 "Unable to allocate memory for the receive descriptor ring\n");
1687 memset(rxdr->ps_page_dma, 0, size);
1689 if (adapter->hw.mac_type <= e1000_82547_rev_2)
1690 desc_len = sizeof(struct e1000_rx_desc);
1692 desc_len = sizeof(union e1000_rx_desc_packet_split);
1694 /* Round up to nearest 4K */
1696 rxdr->size = rxdr->count * desc_len;
1697 E1000_ROUNDUP(rxdr->size, 4096);
1699 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1703 "Unable to allocate memory for the receive descriptor ring\n");
1705 vfree(rxdr->buffer_info);
1706 kfree(rxdr->ps_page);
1707 kfree(rxdr->ps_page_dma);
1711 /* Fix for errata 23, can't cross 64kB boundary */
1712 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1713 void *olddesc = rxdr->desc;
1714 dma_addr_t olddma = rxdr->dma;
1715 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
1716 "at %p\n", rxdr->size, rxdr->desc);
1717 /* Try again, without freeing the previous */
1718 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
1719 /* Failed allocation, critical failure */
1721 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1723 "Unable to allocate memory "
1724 "for the receive descriptor ring\n");
1725 goto setup_rx_desc_die;
1728 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1730 pci_free_consistent(pdev, rxdr->size, rxdr->desc,
1732 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1734 "Unable to allocate aligned memory "
1735 "for the receive descriptor ring\n");
1736 goto setup_rx_desc_die;
1738 /* Free old allocation, new allocation was successful */
1739 pci_free_consistent(pdev, rxdr->size, olddesc, olddma);
1742 memset(rxdr->desc, 0, rxdr->size);
1744 rxdr->next_to_clean = 0;
1745 rxdr->next_to_use = 0;
1751 * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1752 * (Descriptors) for all queues
1753 * @adapter: board private structure
1755 * Return 0 on success, negative on failure
1759 e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1763 for (i = 0; i < adapter->num_rx_queues; i++) {
1764 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1767 "Allocation for Rx Queue %u failed\n", i);
1768 for (i-- ; i >= 0; i--)
1769 e1000_free_rx_resources(adapter,
1770 &adapter->rx_ring[i]);
1779 * e1000_setup_rctl - configure the receive control registers
1780 * @adapter: Board private structure
1782 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
1783 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
1785 e1000_setup_rctl(struct e1000_adapter *adapter)
1787 uint32_t rctl, rfctl;
1788 uint32_t psrctl = 0;
1789 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1793 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1795 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1797 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1798 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1799 (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
1801 if (adapter->hw.tbi_compatibility_on == 1)
1802 rctl |= E1000_RCTL_SBP;
1804 rctl &= ~E1000_RCTL_SBP;
1806 if (adapter->netdev->mtu <= ETH_DATA_LEN)
1807 rctl &= ~E1000_RCTL_LPE;
1809 rctl |= E1000_RCTL_LPE;
1811 /* Setup buffer sizes */
1812 rctl &= ~E1000_RCTL_SZ_4096;
1813 rctl |= E1000_RCTL_BSEX;
1814 switch (adapter->rx_buffer_len) {
1815 case E1000_RXBUFFER_256:
1816 rctl |= E1000_RCTL_SZ_256;
1817 rctl &= ~E1000_RCTL_BSEX;
1819 case E1000_RXBUFFER_512:
1820 rctl |= E1000_RCTL_SZ_512;
1821 rctl &= ~E1000_RCTL_BSEX;
1823 case E1000_RXBUFFER_1024:
1824 rctl |= E1000_RCTL_SZ_1024;
1825 rctl &= ~E1000_RCTL_BSEX;
1827 case E1000_RXBUFFER_2048:
1829 rctl |= E1000_RCTL_SZ_2048;
1830 rctl &= ~E1000_RCTL_BSEX;
1832 case E1000_RXBUFFER_4096:
1833 rctl |= E1000_RCTL_SZ_4096;
1835 case E1000_RXBUFFER_8192:
1836 rctl |= E1000_RCTL_SZ_8192;
1838 case E1000_RXBUFFER_16384:
1839 rctl |= E1000_RCTL_SZ_16384;
1843 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1844 /* 82571 and greater support packet-split where the protocol
1845 * header is placed in skb->data and the packet data is
1846 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
1847 * In the case of a non-split, skb->data is linearly filled,
1848 * followed by the page buffers. Therefore, skb->data is
1849 * sized to hold the largest protocol header.
1851 /* allocations using alloc_page take too long for regular MTU
1852 * so only enable packet split for jumbo frames */
1853 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
1854 if ((adapter->hw.mac_type >= e1000_82571) && (pages <= 3) &&
1855 PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
1856 adapter->rx_ps_pages = pages;
1858 adapter->rx_ps_pages = 0;
1860 if (adapter->rx_ps_pages) {
1861 /* Configure extra packet-split registers */
1862 rfctl = E1000_READ_REG(&adapter->hw, RFCTL);
1863 rfctl |= E1000_RFCTL_EXTEN;
1864 /* disable packet split support for IPv6 extension headers,
1865 * because some malformed IPv6 headers can hang the RX */
1866 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1867 E1000_RFCTL_NEW_IPV6_EXT_DIS);
1869 E1000_WRITE_REG(&adapter->hw, RFCTL, rfctl);
1871 rctl |= E1000_RCTL_DTYP_PS;
1873 psrctl |= adapter->rx_ps_bsize0 >>
1874 E1000_PSRCTL_BSIZE0_SHIFT;
1876 switch (adapter->rx_ps_pages) {
1878 psrctl |= PAGE_SIZE <<
1879 E1000_PSRCTL_BSIZE3_SHIFT;
1881 psrctl |= PAGE_SIZE <<
1882 E1000_PSRCTL_BSIZE2_SHIFT;
1884 psrctl |= PAGE_SIZE >>
1885 E1000_PSRCTL_BSIZE1_SHIFT;
1889 E1000_WRITE_REG(&adapter->hw, PSRCTL, psrctl);
1892 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1896 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1897 * @adapter: board private structure
1899 * Configure the Rx unit of the MAC after a reset.
1903 e1000_configure_rx(struct e1000_adapter *adapter)
1906 struct e1000_hw *hw = &adapter->hw;
1907 uint32_t rdlen, rctl, rxcsum, ctrl_ext;
1909 if (adapter->rx_ps_pages) {
1910 /* this is a 32 byte descriptor */
1911 rdlen = adapter->rx_ring[0].count *
1912 sizeof(union e1000_rx_desc_packet_split);
1913 adapter->clean_rx = e1000_clean_rx_irq_ps;
1914 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
1916 rdlen = adapter->rx_ring[0].count *
1917 sizeof(struct e1000_rx_desc);
1918 adapter->clean_rx = e1000_clean_rx_irq;
1919 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1922 /* disable receives while setting up the descriptors */
1923 rctl = E1000_READ_REG(hw, RCTL);
1924 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
1926 /* set the Receive Delay Timer Register */
1927 E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
1929 if (hw->mac_type >= e1000_82540) {
1930 E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
1931 if (adapter->itr_setting != 0)
1932 E1000_WRITE_REG(hw, ITR,
1933 1000000000 / (adapter->itr * 256));
1936 if (hw->mac_type >= e1000_82571) {
1937 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
1938 /* Reset delay timers after every interrupt */
1939 ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
1940 #ifdef CONFIG_E1000_NAPI
1941 /* Auto-Mask interrupts upon ICR access */
1942 ctrl_ext |= E1000_CTRL_EXT_IAME;
1943 E1000_WRITE_REG(hw, IAM, 0xffffffff);
1945 E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
1946 E1000_WRITE_FLUSH(hw);
1949 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1950 * the Base and Length of the Rx Descriptor Ring */
1951 switch (adapter->num_rx_queues) {
1954 rdba = adapter->rx_ring[0].dma;
1955 E1000_WRITE_REG(hw, RDLEN, rdlen);
1956 E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
1957 E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
1958 E1000_WRITE_REG(hw, RDT, 0);
1959 E1000_WRITE_REG(hw, RDH, 0);
1960 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1961 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1965 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1966 if (hw->mac_type >= e1000_82543) {
1967 rxcsum = E1000_READ_REG(hw, RXCSUM);
1968 if (adapter->rx_csum == TRUE) {
1969 rxcsum |= E1000_RXCSUM_TUOFL;
1971 /* Enable 82571 IPv4 payload checksum for UDP fragments
1972 * Must be used in conjunction with packet-split. */
1973 if ((hw->mac_type >= e1000_82571) &&
1974 (adapter->rx_ps_pages)) {
1975 rxcsum |= E1000_RXCSUM_IPPCSE;
1978 rxcsum &= ~E1000_RXCSUM_TUOFL;
1979 /* don't need to clear IPPCSE as it defaults to 0 */
1981 E1000_WRITE_REG(hw, RXCSUM, rxcsum);
1984 /* enable early receives on 82573, only takes effect if using > 2048
1985 * byte total frame size. for example only for jumbo frames */
1986 #define E1000_ERT_2048 0x100
1987 if (hw->mac_type == e1000_82573)
1988 E1000_WRITE_REG(hw, ERT, E1000_ERT_2048);
1990 /* Enable Receives */
1991 E1000_WRITE_REG(hw, RCTL, rctl);
1995 * e1000_free_tx_resources - Free Tx Resources per Queue
1996 * @adapter: board private structure
1997 * @tx_ring: Tx descriptor ring for a specific queue
1999 * Free all transmit software resources
2003 e1000_free_tx_resources(struct e1000_adapter *adapter,
2004 struct e1000_tx_ring *tx_ring)
2006 struct pci_dev *pdev = adapter->pdev;
2008 e1000_clean_tx_ring(adapter, tx_ring);
2010 vfree(tx_ring->buffer_info);
2011 tx_ring->buffer_info = NULL;
2013 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
2015 tx_ring->desc = NULL;
2019 * e1000_free_all_tx_resources - Free Tx Resources for All Queues
2020 * @adapter: board private structure
2022 * Free all transmit software resources
2026 e1000_free_all_tx_resources(struct e1000_adapter *adapter)
2030 for (i = 0; i < adapter->num_tx_queues; i++)
2031 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
2035 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
2036 struct e1000_buffer *buffer_info)
2038 if (buffer_info->dma) {
2039 pci_unmap_page(adapter->pdev,
2041 buffer_info->length,
2043 buffer_info->dma = 0;
2045 if (buffer_info->skb) {
2046 dev_kfree_skb_any(buffer_info->skb);
2047 buffer_info->skb = NULL;
2049 /* buffer_info must be completely set up in the transmit path */
2053 * e1000_clean_tx_ring - Free Tx Buffers
2054 * @adapter: board private structure
2055 * @tx_ring: ring to be cleaned
2059 e1000_clean_tx_ring(struct e1000_adapter *adapter,
2060 struct e1000_tx_ring *tx_ring)
2062 struct e1000_buffer *buffer_info;
2066 /* Free all the Tx ring sk_buffs */
2068 for (i = 0; i < tx_ring->count; i++) {
2069 buffer_info = &tx_ring->buffer_info[i];
2070 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2073 size = sizeof(struct e1000_buffer) * tx_ring->count;
2074 memset(tx_ring->buffer_info, 0, size);
2076 /* Zero out the descriptor ring */
2078 memset(tx_ring->desc, 0, tx_ring->size);
2080 tx_ring->next_to_use = 0;
2081 tx_ring->next_to_clean = 0;
2082 tx_ring->last_tx_tso = 0;
2084 writel(0, adapter->hw.hw_addr + tx_ring->tdh);
2085 writel(0, adapter->hw.hw_addr + tx_ring->tdt);
2089 * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2090 * @adapter: board private structure
2094 e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2098 for (i = 0; i < adapter->num_tx_queues; i++)
2099 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2103 * e1000_free_rx_resources - Free Rx Resources
2104 * @adapter: board private structure
2105 * @rx_ring: ring to clean the resources from
2107 * Free all receive software resources
2111 e1000_free_rx_resources(struct e1000_adapter *adapter,
2112 struct e1000_rx_ring *rx_ring)
2114 struct pci_dev *pdev = adapter->pdev;
2116 e1000_clean_rx_ring(adapter, rx_ring);
2118 vfree(rx_ring->buffer_info);
2119 rx_ring->buffer_info = NULL;
2120 kfree(rx_ring->ps_page);
2121 rx_ring->ps_page = NULL;
2122 kfree(rx_ring->ps_page_dma);
2123 rx_ring->ps_page_dma = NULL;
2125 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
2127 rx_ring->desc = NULL;
2131 * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2132 * @adapter: board private structure
2134 * Free all receive software resources
2138 e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2142 for (i = 0; i < adapter->num_rx_queues; i++)
2143 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2147 * e1000_clean_rx_ring - Free Rx Buffers per Queue
2148 * @adapter: board private structure
2149 * @rx_ring: ring to free buffers from
2153 e1000_clean_rx_ring(struct e1000_adapter *adapter,
2154 struct e1000_rx_ring *rx_ring)
2156 struct e1000_buffer *buffer_info;
2157 struct e1000_ps_page *ps_page;
2158 struct e1000_ps_page_dma *ps_page_dma;
2159 struct pci_dev *pdev = adapter->pdev;
2163 /* Free all the Rx ring sk_buffs */
2164 for (i = 0; i < rx_ring->count; i++) {
2165 buffer_info = &rx_ring->buffer_info[i];
2166 if (buffer_info->skb) {
2167 pci_unmap_single(pdev,
2169 buffer_info->length,
2170 PCI_DMA_FROMDEVICE);
2172 dev_kfree_skb(buffer_info->skb);
2173 buffer_info->skb = NULL;
2175 ps_page = &rx_ring->ps_page[i];
2176 ps_page_dma = &rx_ring->ps_page_dma[i];
2177 for (j = 0; j < adapter->rx_ps_pages; j++) {
2178 if (!ps_page->ps_page[j]) break;
2179 pci_unmap_page(pdev,
2180 ps_page_dma->ps_page_dma[j],
2181 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2182 ps_page_dma->ps_page_dma[j] = 0;
2183 put_page(ps_page->ps_page[j]);
2184 ps_page->ps_page[j] = NULL;
2188 size = sizeof(struct e1000_buffer) * rx_ring->count;
2189 memset(rx_ring->buffer_info, 0, size);
2190 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2191 memset(rx_ring->ps_page, 0, size);
2192 size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
2193 memset(rx_ring->ps_page_dma, 0, size);
2195 /* Zero out the descriptor ring */
2197 memset(rx_ring->desc, 0, rx_ring->size);
2199 rx_ring->next_to_clean = 0;
2200 rx_ring->next_to_use = 0;
2202 writel(0, adapter->hw.hw_addr + rx_ring->rdh);
2203 writel(0, adapter->hw.hw_addr + rx_ring->rdt);
2207 * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2208 * @adapter: board private structure
2212 e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2216 for (i = 0; i < adapter->num_rx_queues; i++)
2217 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2220 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2221 * and memory write and invalidate disabled for certain operations
2224 e1000_enter_82542_rst(struct e1000_adapter *adapter)
2226 struct net_device *netdev = adapter->netdev;
2229 e1000_pci_clear_mwi(&adapter->hw);
2231 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2232 rctl |= E1000_RCTL_RST;
2233 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2234 E1000_WRITE_FLUSH(&adapter->hw);
2237 if (netif_running(netdev))
2238 e1000_clean_all_rx_rings(adapter);
2242 e1000_leave_82542_rst(struct e1000_adapter *adapter)
2244 struct net_device *netdev = adapter->netdev;
2247 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2248 rctl &= ~E1000_RCTL_RST;
2249 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2250 E1000_WRITE_FLUSH(&adapter->hw);
2253 if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
2254 e1000_pci_set_mwi(&adapter->hw);
2256 if (netif_running(netdev)) {
2257 /* No need to loop, because 82542 supports only 1 queue */
2258 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2259 e1000_configure_rx(adapter);
2260 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2265 * e1000_set_mac - Change the Ethernet Address of the NIC
2266 * @netdev: network interface device structure
2267 * @p: pointer to an address structure
2269 * Returns 0 on success, negative on failure
2273 e1000_set_mac(struct net_device *netdev, void *p)
2275 struct e1000_adapter *adapter = netdev_priv(netdev);
2276 struct sockaddr *addr = p;
2278 if (!is_valid_ether_addr(addr->sa_data))
2279 return -EADDRNOTAVAIL;
2281 /* 82542 2.0 needs to be in reset to write receive address registers */
2283 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2284 e1000_enter_82542_rst(adapter);
2286 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2287 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
2289 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2291 /* With 82571 controllers, LAA may be overwritten (with the default)
2292 * due to controller reset from the other port. */
2293 if (adapter->hw.mac_type == e1000_82571) {
2294 /* activate the work around */
2295 adapter->hw.laa_is_present = 1;
2297 /* Hold a copy of the LAA in RAR[14] This is done so that
2298 * between the time RAR[0] gets clobbered and the time it
2299 * gets fixed (in e1000_watchdog), the actual LAA is in one
2300 * of the RARs and no incoming packets directed to this port
2301 * are dropped. Eventaully the LAA will be in RAR[0] and
2303 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr,
2304 E1000_RAR_ENTRIES - 1);
2307 if (adapter->hw.mac_type == e1000_82542_rev2_0)
2308 e1000_leave_82542_rst(adapter);
2314 * e1000_set_multi - Multicast and Promiscuous mode set
2315 * @netdev: network interface device structure
2317 * The set_multi entry point is called whenever the multicast address
2318 * list or the network interface flags are updated. This routine is
2319 * responsible for configuring the hardware for proper multicast,
2320 * promiscuous mode, and all-multi behavior.
2324 e1000_set_multi(struct net_device *netdev)
2326 struct e1000_adapter *adapter = netdev_priv(netdev);
2327 struct e1000_hw *hw = &adapter->hw;
2328 struct dev_mc_list *mc_ptr;
2330 uint32_t hash_value;
2331 int i, rar_entries = E1000_RAR_ENTRIES;
2332 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2333 E1000_NUM_MTA_REGISTERS_ICH8LAN :
2334 E1000_NUM_MTA_REGISTERS;
2336 if (adapter->hw.mac_type == e1000_ich8lan)
2337 rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
2339 /* reserve RAR[14] for LAA over-write work-around */
2340 if (adapter->hw.mac_type == e1000_82571)
2343 /* Check for Promiscuous and All Multicast modes */
2345 rctl = E1000_READ_REG(hw, RCTL);
2347 if (netdev->flags & IFF_PROMISC) {
2348 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2349 } else if (netdev->flags & IFF_ALLMULTI) {
2350 rctl |= E1000_RCTL_MPE;
2351 rctl &= ~E1000_RCTL_UPE;
2353 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
2356 E1000_WRITE_REG(hw, RCTL, rctl);
2358 /* 82542 2.0 needs to be in reset to write receive address registers */
2360 if (hw->mac_type == e1000_82542_rev2_0)
2361 e1000_enter_82542_rst(adapter);
2363 /* load the first 14 multicast address into the exact filters 1-14
2364 * RAR 0 is used for the station MAC adddress
2365 * if there are not 14 addresses, go ahead and clear the filters
2366 * -- with 82571 controllers only 0-13 entries are filled here
2368 mc_ptr = netdev->mc_list;
2370 for (i = 1; i < rar_entries; i++) {
2372 e1000_rar_set(hw, mc_ptr->dmi_addr, i);
2373 mc_ptr = mc_ptr->next;
2375 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2376 E1000_WRITE_FLUSH(hw);
2377 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2378 E1000_WRITE_FLUSH(hw);
2382 /* clear the old settings from the multicast hash table */
2384 for (i = 0; i < mta_reg_count; i++) {
2385 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
2386 E1000_WRITE_FLUSH(hw);
2389 /* load any remaining addresses into the hash table */
2391 for (; mc_ptr; mc_ptr = mc_ptr->next) {
2392 hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
2393 e1000_mta_set(hw, hash_value);
2396 if (hw->mac_type == e1000_82542_rev2_0)
2397 e1000_leave_82542_rst(adapter);
2400 /* Need to wait a few seconds after link up to get diagnostic information from
2404 e1000_update_phy_info(unsigned long data)
2406 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2407 e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2411 * e1000_82547_tx_fifo_stall - Timer Call-back
2412 * @data: pointer to adapter cast into an unsigned long
2416 e1000_82547_tx_fifo_stall(unsigned long data)
2418 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2419 struct net_device *netdev = adapter->netdev;
2422 if (atomic_read(&adapter->tx_fifo_stall)) {
2423 if ((E1000_READ_REG(&adapter->hw, TDT) ==
2424 E1000_READ_REG(&adapter->hw, TDH)) &&
2425 (E1000_READ_REG(&adapter->hw, TDFT) ==
2426 E1000_READ_REG(&adapter->hw, TDFH)) &&
2427 (E1000_READ_REG(&adapter->hw, TDFTS) ==
2428 E1000_READ_REG(&adapter->hw, TDFHS))) {
2429 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2430 E1000_WRITE_REG(&adapter->hw, TCTL,
2431 tctl & ~E1000_TCTL_EN);
2432 E1000_WRITE_REG(&adapter->hw, TDFT,
2433 adapter->tx_head_addr);
2434 E1000_WRITE_REG(&adapter->hw, TDFH,
2435 adapter->tx_head_addr);
2436 E1000_WRITE_REG(&adapter->hw, TDFTS,
2437 adapter->tx_head_addr);
2438 E1000_WRITE_REG(&adapter->hw, TDFHS,
2439 adapter->tx_head_addr);
2440 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
2441 E1000_WRITE_FLUSH(&adapter->hw);
2443 adapter->tx_fifo_head = 0;
2444 atomic_set(&adapter->tx_fifo_stall, 0);
2445 netif_wake_queue(netdev);
2447 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2453 * e1000_watchdog - Timer Call-back
2454 * @data: pointer to adapter cast into an unsigned long
2457 e1000_watchdog(unsigned long data)
2459 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2460 struct net_device *netdev = adapter->netdev;
2461 struct e1000_tx_ring *txdr = adapter->tx_ring;
2462 uint32_t link, tctl;
2465 ret_val = e1000_check_for_link(&adapter->hw);
2466 if ((ret_val == E1000_ERR_PHY) &&
2467 (adapter->hw.phy_type == e1000_phy_igp_3) &&
2468 (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2469 /* See e1000_kumeran_lock_loss_workaround() */
2471 "Gigabit has been disabled, downgrading speed\n");
2474 if (adapter->hw.mac_type == e1000_82573) {
2475 e1000_enable_tx_pkt_filtering(&adapter->hw);
2476 if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
2477 e1000_update_mng_vlan(adapter);
2480 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2481 !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
2482 link = !adapter->hw.serdes_link_down;
2484 link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
2487 if (!netif_carrier_ok(netdev)) {
2488 boolean_t txb2b = 1;
2489 e1000_get_speed_and_duplex(&adapter->hw,
2490 &adapter->link_speed,
2491 &adapter->link_duplex);
2493 DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
2494 adapter->link_speed,
2495 adapter->link_duplex == FULL_DUPLEX ?
2496 "Full Duplex" : "Half Duplex");
2498 /* tweak tx_queue_len according to speed/duplex
2499 * and adjust the timeout factor */
2500 netdev->tx_queue_len = adapter->tx_queue_len;
2501 adapter->tx_timeout_factor = 1;
2502 switch (adapter->link_speed) {
2505 netdev->tx_queue_len = 10;
2506 adapter->tx_timeout_factor = 8;
2510 netdev->tx_queue_len = 100;
2511 /* maybe add some timeout factor ? */
2515 if ((adapter->hw.mac_type == e1000_82571 ||
2516 adapter->hw.mac_type == e1000_82572) &&
2519 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2520 tarc0 &= ~(1 << 21);
2521 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
2525 /* disable TSO for pcie and 10/100 speeds, to avoid
2526 * some hardware issues */
2527 if (!adapter->tso_force &&
2528 adapter->hw.bus_type == e1000_bus_type_pci_express){
2529 switch (adapter->link_speed) {
2533 "10/100 speed: disabling TSO\n");
2534 netdev->features &= ~NETIF_F_TSO;
2536 netdev->features &= ~NETIF_F_TSO6;
2540 netdev->features |= NETIF_F_TSO;
2542 netdev->features |= NETIF_F_TSO6;
2552 /* enable transmits in the hardware, need to do this
2553 * after setting TARC0 */
2554 tctl = E1000_READ_REG(&adapter->hw, TCTL);
2555 tctl |= E1000_TCTL_EN;
2556 E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
2558 netif_carrier_on(netdev);
2559 netif_wake_queue(netdev);
2560 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2561 adapter->smartspeed = 0;
2564 if (netif_carrier_ok(netdev)) {
2565 adapter->link_speed = 0;
2566 adapter->link_duplex = 0;
2567 DPRINTK(LINK, INFO, "NIC Link is Down\n");
2568 netif_carrier_off(netdev);
2569 netif_stop_queue(netdev);
2570 mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
2572 /* 80003ES2LAN workaround--
2573 * For packet buffer work-around on link down event;
2574 * disable receives in the ISR and
2575 * reset device here in the watchdog
2577 if (adapter->hw.mac_type == e1000_80003es2lan)
2579 schedule_work(&adapter->reset_task);
2582 e1000_smartspeed(adapter);
2585 e1000_update_stats(adapter);
2587 adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2588 adapter->tpt_old = adapter->stats.tpt;
2589 adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
2590 adapter->colc_old = adapter->stats.colc;
2592 adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2593 adapter->gorcl_old = adapter->stats.gorcl;
2594 adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2595 adapter->gotcl_old = adapter->stats.gotcl;
2597 e1000_update_adaptive(&adapter->hw);
2599 if (!netif_carrier_ok(netdev)) {
2600 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2601 /* We've lost link, so the controller stops DMA,
2602 * but we've got queued Tx work that's never going
2603 * to get done, so reset controller to flush Tx.
2604 * (Do the reset outside of interrupt context). */
2605 adapter->tx_timeout_count++;
2606 schedule_work(&adapter->reset_task);
2610 /* Cause software interrupt to ensure rx ring is cleaned */
2611 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
2613 /* Force detection of hung controller every watchdog period */
2614 adapter->detect_tx_hung = TRUE;
2616 /* With 82571 controllers, LAA may be overwritten due to controller
2617 * reset from the other port. Set the appropriate LAA in RAR[0] */
2618 if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
2619 e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
2621 /* Reset the timer */
2622 mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
2625 enum latency_range {
2629 latency_invalid = 255
2633 * e1000_update_itr - update the dynamic ITR value based on statistics
2634 * Stores a new ITR value based on packets and byte
2635 * counts during the last interrupt. The advantage of per interrupt
2636 * computation is faster updates and more accurate ITR for the current
2637 * traffic pattern. Constants in this function were computed
2638 * based on theoretical maximum wire speed and thresholds were set based
2639 * on testing data as well as attempting to minimize response time
2640 * while increasing bulk throughput.
2641 * this functionality is controlled by the InterruptThrottleRate module
2642 * parameter (see e1000_param.c)
2643 * @adapter: pointer to adapter
2644 * @itr_setting: current adapter->itr
2645 * @packets: the number of packets during this measurement interval
2646 * @bytes: the number of bytes during this measurement interval
2648 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2649 uint16_t itr_setting,
2653 unsigned int retval = itr_setting;
2654 struct e1000_hw *hw = &adapter->hw;
2656 if (unlikely(hw->mac_type < e1000_82540))
2657 goto update_itr_done;
2660 goto update_itr_done;
2662 switch (itr_setting) {
2663 case lowest_latency:
2664 /* jumbo frames get bulk treatment*/
2665 if (bytes/packets > 8000)
2666 retval = bulk_latency;
2667 else if ((packets < 5) && (bytes > 512))
2668 retval = low_latency;
2670 case low_latency: /* 50 usec aka 20000 ints/s */
2671 if (bytes > 10000) {
2672 /* jumbo frames need bulk latency setting */
2673 if (bytes/packets > 8000)
2674 retval = bulk_latency;
2675 else if ((packets < 10) || ((bytes/packets) > 1200))
2676 retval = bulk_latency;
2677 else if ((packets > 35))
2678 retval = lowest_latency;
2679 } else if (bytes/packets > 2000)
2680 retval = bulk_latency;
2681 else if (packets <= 2 && bytes < 512)
2682 retval = lowest_latency;
2684 case bulk_latency: /* 250 usec aka 4000 ints/s */
2685 if (bytes > 25000) {
2687 retval = low_latency;
2688 } else if (bytes < 6000) {
2689 retval = low_latency;
2698 static void e1000_set_itr(struct e1000_adapter *adapter)
2700 struct e1000_hw *hw = &adapter->hw;
2701 uint16_t current_itr;
2702 uint32_t new_itr = adapter->itr;
2704 if (unlikely(hw->mac_type < e1000_82540))
2707 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2708 if (unlikely(adapter->link_speed != SPEED_1000)) {
2714 adapter->tx_itr = e1000_update_itr(adapter,
2716 adapter->total_tx_packets,
2717 adapter->total_tx_bytes);
2718 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2719 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2720 adapter->tx_itr = low_latency;
2722 adapter->rx_itr = e1000_update_itr(adapter,
2724 adapter->total_rx_packets,
2725 adapter->total_rx_bytes);
2726 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2727 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2728 adapter->rx_itr = low_latency;
2730 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2732 switch (current_itr) {
2733 /* counts and packets in update_itr are dependent on these numbers */
2734 case lowest_latency:
2738 new_itr = 20000; /* aka hwitr = ~200 */
2748 if (new_itr != adapter->itr) {
2749 /* this attempts to bias the interrupt rate towards Bulk
2750 * by adding intermediate steps when interrupt rate is
2752 new_itr = new_itr > adapter->itr ?
2753 min(adapter->itr + (new_itr >> 2), new_itr) :
2755 adapter->itr = new_itr;
2756 E1000_WRITE_REG(hw, ITR, 1000000000 / (new_itr * 256));
2762 #define E1000_TX_FLAGS_CSUM 0x00000001
2763 #define E1000_TX_FLAGS_VLAN 0x00000002
2764 #define E1000_TX_FLAGS_TSO 0x00000004
2765 #define E1000_TX_FLAGS_IPV4 0x00000008
2766 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
2767 #define E1000_TX_FLAGS_VLAN_SHIFT 16
2770 e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2771 struct sk_buff *skb)
2774 struct e1000_context_desc *context_desc;
2775 struct e1000_buffer *buffer_info;
2777 uint32_t cmd_length = 0;
2778 uint16_t ipcse = 0, tucse, mss;
2779 uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
2782 if (skb_is_gso(skb)) {
2783 if (skb_header_cloned(skb)) {
2784 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2789 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
2790 mss = skb_shinfo(skb)->gso_size;
2791 if (skb->protocol == htons(ETH_P_IP)) {
2792 skb->nh.iph->tot_len = 0;
2793 skb->nh.iph->check = 0;
2795 ~csum_tcpudp_magic(skb->nh.iph->saddr,
2800 cmd_length = E1000_TXD_CMD_IP;
2801 ipcse = skb->h.raw - skb->data - 1;
2803 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2804 skb->nh.ipv6h->payload_len = 0;
2806 ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
2807 &skb->nh.ipv6h->daddr,
2814 ipcss = skb->nh.raw - skb->data;
2815 ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
2816 tucss = skb->h.raw - skb->data;
2817 tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
2820 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2821 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2823 i = tx_ring->next_to_use;
2824 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2825 buffer_info = &tx_ring->buffer_info[i];
2827 context_desc->lower_setup.ip_fields.ipcss = ipcss;
2828 context_desc->lower_setup.ip_fields.ipcso = ipcso;
2829 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
2830 context_desc->upper_setup.tcp_fields.tucss = tucss;
2831 context_desc->upper_setup.tcp_fields.tucso = tucso;
2832 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2833 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
2834 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2835 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2837 buffer_info->time_stamp = jiffies;
2838 buffer_info->next_to_watch = i;
2840 if (++i == tx_ring->count) i = 0;
2841 tx_ring->next_to_use = i;
2851 e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2852 struct sk_buff *skb)
2854 struct e1000_context_desc *context_desc;
2855 struct e1000_buffer *buffer_info;
2859 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2860 css = skb->h.raw - skb->data;
2862 i = tx_ring->next_to_use;
2863 buffer_info = &tx_ring->buffer_info[i];
2864 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2866 context_desc->upper_setup.tcp_fields.tucss = css;
2867 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
2868 context_desc->upper_setup.tcp_fields.tucse = 0;
2869 context_desc->tcp_seg_setup.data = 0;
2870 context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
2872 buffer_info->time_stamp = jiffies;
2873 buffer_info->next_to_watch = i;
2875 if (unlikely(++i == tx_ring->count)) i = 0;
2876 tx_ring->next_to_use = i;
2884 #define E1000_MAX_TXD_PWR 12
2885 #define E1000_MAX_DATA_PER_TXD (1<<E1000_MAX_TXD_PWR)
2888 e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2889 struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
2890 unsigned int nr_frags, unsigned int mss)
2892 struct e1000_buffer *buffer_info;
2893 unsigned int len = skb->len;
2894 unsigned int offset = 0, size, count = 0, i;
2896 len -= skb->data_len;
2898 i = tx_ring->next_to_use;
2901 buffer_info = &tx_ring->buffer_info[i];
2902 size = min(len, max_per_txd);
2904 /* Workaround for Controller erratum --
2905 * descriptor for non-tso packet in a linear SKB that follows a
2906 * tso gets written back prematurely before the data is fully
2907 * DMA'd to the controller */
2908 if (!skb->data_len && tx_ring->last_tx_tso &&
2910 tx_ring->last_tx_tso = 0;
2914 /* Workaround for premature desc write-backs
2915 * in TSO mode. Append 4-byte sentinel desc */
2916 if (unlikely(mss && !nr_frags && size == len && size > 8))
2919 /* work-around for errata 10 and it applies
2920 * to all controllers in PCI-X mode
2921 * The fix is to make sure that the first descriptor of a
2922 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2924 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
2925 (size > 2015) && count == 0))
2928 /* Workaround for potential 82544 hang in PCI-X. Avoid
2929 * terminating buffers within evenly-aligned dwords. */
2930 if (unlikely(adapter->pcix_82544 &&
2931 !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2935 buffer_info->length = size;
2937 pci_map_single(adapter->pdev,
2941 buffer_info->time_stamp = jiffies;
2942 buffer_info->next_to_watch = i;
2947 if (unlikely(++i == tx_ring->count)) i = 0;
2950 for (f = 0; f < nr_frags; f++) {
2951 struct skb_frag_struct *frag;
2953 frag = &skb_shinfo(skb)->frags[f];
2955 offset = frag->page_offset;
2958 buffer_info = &tx_ring->buffer_info[i];
2959 size = min(len, max_per_txd);
2961 /* Workaround for premature desc write-backs
2962 * in TSO mode. Append 4-byte sentinel desc */
2963 if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2966 /* Workaround for potential 82544 hang in PCI-X.
2967 * Avoid terminating buffers within evenly-aligned
2969 if (unlikely(adapter->pcix_82544 &&
2970 !((unsigned long)(frag->page+offset+size-1) & 4) &&
2974 buffer_info->length = size;
2976 pci_map_page(adapter->pdev,
2981 buffer_info->time_stamp = jiffies;
2982 buffer_info->next_to_watch = i;
2987 if (unlikely(++i == tx_ring->count)) i = 0;
2991 i = (i == 0) ? tx_ring->count - 1 : i - 1;
2992 tx_ring->buffer_info[i].skb = skb;
2993 tx_ring->buffer_info[first].next_to_watch = i;
2999 e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3000 int tx_flags, int count)
3002 struct e1000_tx_desc *tx_desc = NULL;
3003 struct e1000_buffer *buffer_info;
3004 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3007 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3008 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3010 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3012 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3013 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3016 if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3017 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3018 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3021 if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3022 txd_lower |= E1000_TXD_CMD_VLE;
3023 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3026 i = tx_ring->next_to_use;
3029 buffer_info = &tx_ring->buffer_info[i];
3030 tx_desc = E1000_TX_DESC(*tx_ring, i);
3031 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3032 tx_desc->lower.data =
3033 cpu_to_le32(txd_lower | buffer_info->length);
3034 tx_desc->upper.data = cpu_to_le32(txd_upper);
3035 if (unlikely(++i == tx_ring->count)) i = 0;
3038 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3040 /* Force memory writes to complete before letting h/w
3041 * know there are new descriptors to fetch. (Only
3042 * applicable for weak-ordered memory model archs,
3043 * such as IA-64). */
3046 tx_ring->next_to_use = i;
3047 writel(i, adapter->hw.hw_addr + tx_ring->tdt);
3048 /* we need this if more than one processor can write to our tail
3049 * at a time, it syncronizes IO on IA64/Altix systems */
3054 * 82547 workaround to avoid controller hang in half-duplex environment.
3055 * The workaround is to avoid queuing a large packet that would span
3056 * the internal Tx FIFO ring boundary by notifying the stack to resend
3057 * the packet at a later time. This gives the Tx FIFO an opportunity to
3058 * flush all packets. When that occurs, we reset the Tx FIFO pointers
3059 * to the beginning of the Tx FIFO.
3062 #define E1000_FIFO_HDR 0x10
3063 #define E1000_82547_PAD_LEN 0x3E0
3066 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
3068 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3069 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
3071 E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
3073 if (adapter->link_duplex != HALF_DUPLEX)
3074 goto no_fifo_stall_required;
3076 if (atomic_read(&adapter->tx_fifo_stall))
3079 if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3080 atomic_set(&adapter->tx_fifo_stall, 1);
3084 no_fifo_stall_required:
3085 adapter->tx_fifo_head += skb_fifo_len;
3086 if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3087 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3091 #define MINIMUM_DHCP_PACKET_SIZE 282
3093 e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
3095 struct e1000_hw *hw = &adapter->hw;
3096 uint16_t length, offset;
3097 if (vlan_tx_tag_present(skb)) {
3098 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
3099 ( adapter->hw.mng_cookie.status &
3100 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) )
3103 if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
3104 struct ethhdr *eth = (struct ethhdr *) skb->data;
3105 if ((htons(ETH_P_IP) == eth->h_proto)) {
3106 const struct iphdr *ip =
3107 (struct iphdr *)((uint8_t *)skb->data+14);
3108 if (IPPROTO_UDP == ip->protocol) {
3109 struct udphdr *udp =
3110 (struct udphdr *)((uint8_t *)ip +
3112 if (ntohs(udp->dest) == 67) {
3113 offset = (uint8_t *)udp + 8 - skb->data;
3114 length = skb->len - offset;
3116 return e1000_mng_write_dhcp_info(hw,
3126 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3128 struct e1000_adapter *adapter = netdev_priv(netdev);
3129 struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3131 netif_stop_queue(netdev);
3132 /* Herbert's original patch had:
3133 * smp_mb__after_netif_stop_queue();
3134 * but since that doesn't exist yet, just open code it. */
3137 /* We need to check again in a case another CPU has just
3138 * made room available. */
3139 if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3143 netif_start_queue(netdev);
3144 ++adapter->restart_queue;
3148 static int e1000_maybe_stop_tx(struct net_device *netdev,
3149 struct e1000_tx_ring *tx_ring, int size)
3151 if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3153 return __e1000_maybe_stop_tx(netdev, size);
3156 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3158 e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3160 struct e1000_adapter *adapter = netdev_priv(netdev);
3161 struct e1000_tx_ring *tx_ring;
3162 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3163 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3164 unsigned int tx_flags = 0;
3165 unsigned int len = skb->len;
3166 unsigned long flags;
3167 unsigned int nr_frags = 0;
3168 unsigned int mss = 0;
3172 len -= skb->data_len;
3174 /* This goes back to the question of how to logically map a tx queue
3175 * to a flow. Right now, performance is impacted slightly negatively
3176 * if using multiple tx queues. If the stack breaks away from a
3177 * single qdisc implementation, we can look at this again. */
3178 tx_ring = adapter->tx_ring;
3180 if (unlikely(skb->len <= 0)) {
3181 dev_kfree_skb_any(skb);
3182 return NETDEV_TX_OK;
3185 /* 82571 and newer doesn't need the workaround that limited descriptor
3187 if (adapter->hw.mac_type >= e1000_82571)
3191 mss = skb_shinfo(skb)->gso_size;
3192 /* The controller does a simple calculation to
3193 * make sure there is enough room in the FIFO before
3194 * initiating the DMA for each buffer. The calc is:
3195 * 4 = ceil(buffer len/mss). To make sure we don't
3196 * overrun the FIFO, adjust the max buffer len if mss
3200 max_per_txd = min(mss << 2, max_per_txd);
3201 max_txd_pwr = fls(max_per_txd) - 1;
3203 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
3204 * points to just header, pull a few bytes of payload from
3205 * frags into skb->data */
3206 hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
3207 if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
3208 switch (adapter->hw.mac_type) {
3209 unsigned int pull_size;
3214 pull_size = min((unsigned int)4, skb->data_len);
3215 if (!__pskb_pull_tail(skb, pull_size)) {
3217 "__pskb_pull_tail failed.\n");
3218 dev_kfree_skb_any(skb);
3219 return NETDEV_TX_OK;
3221 len = skb->len - skb->data_len;
3230 /* reserve a descriptor for the offload context */
3231 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3235 if (skb->ip_summed == CHECKSUM_PARTIAL)
3240 /* Controller Erratum workaround */
3241 if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3245 count += TXD_USE_COUNT(len, max_txd_pwr);
3247 if (adapter->pcix_82544)
3250 /* work-around for errata 10 and it applies to all controllers
3251 * in PCI-X mode, so add one more descriptor to the count
3253 if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
3257 nr_frags = skb_shinfo(skb)->nr_frags;
3258 for (f = 0; f < nr_frags; f++)
3259 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3261 if (adapter->pcix_82544)
3265 if (adapter->hw.tx_pkt_filtering &&
3266 (adapter->hw.mac_type == e1000_82573))
3267 e1000_transfer_dhcp_info(adapter, skb);
3269 local_irq_save(flags);
3270 if (!spin_trylock(&tx_ring->tx_lock)) {
3271 /* Collision - tell upper layer to requeue */
3272 local_irq_restore(flags);
3273 return NETDEV_TX_LOCKED;
3276 /* need: count + 2 desc gap to keep tail from touching
3277 * head, otherwise try next time */
3278 if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
3279 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3280 return NETDEV_TX_BUSY;
3283 if (unlikely(adapter->hw.mac_type == e1000_82547)) {
3284 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3285 netif_stop_queue(netdev);
3286 mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
3287 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3288 return NETDEV_TX_BUSY;
3292 if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
3293 tx_flags |= E1000_TX_FLAGS_VLAN;
3294 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3297 first = tx_ring->next_to_use;
3299 tso = e1000_tso(adapter, tx_ring, skb);
3301 dev_kfree_skb_any(skb);
3302 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3303 return NETDEV_TX_OK;
3307 tx_ring->last_tx_tso = 1;
3308 tx_flags |= E1000_TX_FLAGS_TSO;
3309 } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3310 tx_flags |= E1000_TX_FLAGS_CSUM;
3312 /* Old method was to assume IPv4 packet by default if TSO was enabled.
3313 * 82571 hardware supports TSO capabilities for IPv6 as well...
3314 * no longer assume, we must. */
3315 if (likely(skb->protocol == htons(ETH_P_IP)))
3316 tx_flags |= E1000_TX_FLAGS_IPV4;
3318 e1000_tx_queue(adapter, tx_ring, tx_flags,
3319 e1000_tx_map(adapter, tx_ring, skb, first,
3320 max_per_txd, nr_frags, mss));
3322 netdev->trans_start = jiffies;
3324 /* Make sure there is space in the ring for the next send. */
3325 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3327 spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
3328 return NETDEV_TX_OK;
3332 * e1000_tx_timeout - Respond to a Tx Hang
3333 * @netdev: network interface device structure
3337 e1000_tx_timeout(struct net_device *netdev)
3339 struct e1000_adapter *adapter = netdev_priv(netdev);
3341 /* Do the reset outside of interrupt context */
3342 adapter->tx_timeout_count++;
3343 schedule_work(&adapter->reset_task);
3347 e1000_reset_task(struct work_struct *work)
3349 struct e1000_adapter *adapter =
3350 container_of(work, struct e1000_adapter, reset_task);
3352 e1000_reinit_locked(adapter);
3356 * e1000_get_stats - Get System Network Statistics
3357 * @netdev: network interface device structure
3359 * Returns the address of the device statistics structure.
3360 * The statistics are actually updated from the timer callback.
3363 static struct net_device_stats *
3364 e1000_get_stats(struct net_device *netdev)
3366 struct e1000_adapter *adapter = netdev_priv(netdev);
3368 /* only return the current stats */
3369 return &adapter->net_stats;
3373 * e1000_change_mtu - Change the Maximum Transfer Unit
3374 * @netdev: network interface device structure
3375 * @new_mtu: new value for maximum frame size
3377 * Returns 0 on success, negative on failure
3381 e1000_change_mtu(struct net_device *netdev, int new_mtu)
3383 struct e1000_adapter *adapter = netdev_priv(netdev);
3384 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3385 uint16_t eeprom_data = 0;
3387 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3388 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3389 DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
3393 /* Adapter-specific max frame size limits. */
3394 switch (adapter->hw.mac_type) {
3395 case e1000_undefined ... e1000_82542_rev2_1:
3397 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3398 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
3403 /* Jumbo Frames not supported if:
3404 * - this is not an 82573L device
3405 * - ASPM is enabled in any way (0x1A bits 3:2) */
3406 e1000_read_eeprom(&adapter->hw, EEPROM_INIT_3GIO_3, 1,
3408 if ((adapter->hw.device_id != E1000_DEV_ID_82573L) ||
3409 (eeprom_data & EEPROM_WORD1A_ASPM_MASK)) {
3410 if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
3412 "Jumbo Frames not supported.\n");
3417 /* ERT will be enabled later to enable wire speed receives */
3419 /* fall through to get support */
3422 case e1000_80003es2lan:
3423 #define MAX_STD_JUMBO_FRAME_SIZE 9234
3424 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3425 DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
3430 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3434 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3435 * means we reserve 2 more, this pushes us to allocate from the next
3437 * i.e. RXBUFFER_2048 --> size-4096 slab */
3439 if (max_frame <= E1000_RXBUFFER_256)
3440 adapter->rx_buffer_len = E1000_RXBUFFER_256;
3441 else if (max_frame <= E1000_RXBUFFER_512)
3442 adapter->rx_buffer_len = E1000_RXBUFFER_512;
3443 else if (max_frame <= E1000_RXBUFFER_1024)
3444 adapter->rx_buffer_len = E1000_RXBUFFER_1024;
3445 else if (max_frame <= E1000_RXBUFFER_2048)
3446 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3447 else if (max_frame <= E1000_RXBUFFER_4096)
3448 adapter->rx_buffer_len = E1000_RXBUFFER_4096;
3449 else if (max_frame <= E1000_RXBUFFER_8192)
3450 adapter->rx_buffer_len = E1000_RXBUFFER_8192;
3451 else if (max_frame <= E1000_RXBUFFER_16384)
3452 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3454 /* adjust allocation if LPE protects us, and we aren't using SBP */
3455 if (!adapter->hw.tbi_compatibility_on &&
3456 ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
3457 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3458 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3460 netdev->mtu = new_mtu;
3462 if (netif_running(netdev))
3463 e1000_reinit_locked(adapter);
3465 adapter->hw.max_frame_size = max_frame;
3471 * e1000_update_stats - Update the board statistics counters
3472 * @adapter: board private structure
3476 e1000_update_stats(struct e1000_adapter *adapter)
3478 struct e1000_hw *hw = &adapter->hw;
3479 struct pci_dev *pdev = adapter->pdev;
3480 unsigned long flags;
3483 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3486 * Prevent stats update while adapter is being reset, or if the pci
3487 * connection is down.
3489 if (adapter->link_speed == 0)
3491 if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
3494 spin_lock_irqsave(&adapter->stats_lock, flags);
3496 /* these counters are modified from e1000_adjust_tbi_stats,
3497 * called from the interrupt context, so they must only
3498 * be written while holding adapter->stats_lock
3501 adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
3502 adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
3503 adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
3504 adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
3505 adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
3506 adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
3507 adapter->stats.roc += E1000_READ_REG(hw, ROC);
3509 if (adapter->hw.mac_type != e1000_ich8lan) {
3510 adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
3511 adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
3512 adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
3513 adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
3514 adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
3515 adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
3518 adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
3519 adapter->stats.mpc += E1000_READ_REG(hw, MPC);
3520 adapter->stats.scc += E1000_READ_REG(hw, SCC);
3521 adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
3522 adapter->stats.mcc += E1000_READ_REG(hw, MCC);
3523 adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
3524 adapter->stats.dc += E1000_READ_REG(hw, DC);
3525 adapter->stats.sec += E1000_READ_REG(hw, SEC);
3526 adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
3527 adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
3528 adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
3529 adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
3530 adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
3531 adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
3532 adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
3533 adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
3534 adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
3535 adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
3536 adapter->stats.ruc += E1000_READ_REG(hw, RUC);
3537 adapter->stats.rfc += E1000_READ_REG(hw, RFC);
3538 adapter->stats.rjc += E1000_READ_REG(hw, RJC);
3539 adapter->stats.torl += E1000_READ_REG(hw, TORL);
3540 adapter->stats.torh += E1000_READ_REG(hw, TORH);
3541 adapter->stats.totl += E1000_READ_REG(hw, TOTL);
3542 adapter->stats.toth += E1000_READ_REG(hw, TOTH);
3543 adapter->stats.tpr += E1000_READ_REG(hw, TPR);
3545 if (adapter->hw.mac_type != e1000_ich8lan) {
3546 adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
3547 adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
3548 adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
3549 adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
3550 adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
3551 adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
3554 adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
3555 adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
3557 /* used for adaptive IFS */
3559 hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
3560 adapter->stats.tpt += hw->tx_packet_delta;
3561 hw->collision_delta = E1000_READ_REG(hw, COLC);
3562 adapter->stats.colc += hw->collision_delta;
3564 if (hw->mac_type >= e1000_82543) {
3565 adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
3566 adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
3567 adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
3568 adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
3569 adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
3570 adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
3572 if (hw->mac_type > e1000_82547_rev_2) {
3573 adapter->stats.iac += E1000_READ_REG(hw, IAC);
3574 adapter->stats.icrxoc += E1000_READ_REG(hw, ICRXOC);
3576 if (adapter->hw.mac_type != e1000_ich8lan) {
3577 adapter->stats.icrxptc += E1000_READ_REG(hw, ICRXPTC);
3578 adapter->stats.icrxatc += E1000_READ_REG(hw, ICRXATC);
3579 adapter->stats.ictxptc += E1000_READ_REG(hw, ICTXPTC);
3580 adapter->stats.ictxatc += E1000_READ_REG(hw, ICTXATC);
3581 adapter->stats.ictxqec += E1000_READ_REG(hw, ICTXQEC);
3582 adapter->stats.ictxqmtc += E1000_READ_REG(hw, ICTXQMTC);
3583 adapter->stats.icrxdmtc += E1000_READ_REG(hw, ICRXDMTC);
3587 /* Fill out the OS statistics structure */
3588 adapter->net_stats.rx_packets = adapter->stats.gprc;
3589 adapter->net_stats.tx_packets = adapter->stats.gptc;
3590 adapter->net_stats.rx_bytes = adapter->stats.gorcl;
3591 adapter->net_stats.tx_bytes = adapter->stats.gotcl;
3592 adapter->net_stats.multicast = adapter->stats.mprc;
3593 adapter->net_stats.collisions = adapter->stats.colc;
3597 /* RLEC on some newer hardware can be incorrect so build
3598 * our own version based on RUC and ROC */
3599 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
3600 adapter->stats.crcerrs + adapter->stats.algnerrc +
3601 adapter->stats.ruc + adapter->stats.roc +
3602 adapter->stats.cexterr;
3603 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3604 adapter->net_stats.rx_length_errors = adapter->stats.rlerrc;
3605 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
3606 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
3607 adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
3610 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3611 adapter->net_stats.tx_errors = adapter->stats.txerrc;
3612 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
3613 adapter->net_stats.tx_window_errors = adapter->stats.latecol;
3614 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
3615 if (adapter->hw.bad_tx_carr_stats_fd &&
3616 adapter->link_duplex == FULL_DUPLEX) {
3617 adapter->net_stats.tx_carrier_errors = 0;
3618 adapter->stats.tncrs = 0;
3621 /* Tx Dropped needs to be maintained elsewhere */
3624 if (hw->media_type == e1000_media_type_copper) {
3625 if ((adapter->link_speed == SPEED_1000) &&
3626 (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3627 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3628 adapter->phy_stats.idle_errors += phy_tmp;
3631 if ((hw->mac_type <= e1000_82546) &&
3632 (hw->phy_type == e1000_phy_m88) &&
3633 !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3634 adapter->phy_stats.receive_errors += phy_tmp;
3637 spin_unlock_irqrestore(&adapter->stats_lock, flags);
3639 #ifdef CONFIG_PCI_MSI
3642 * e1000_intr_msi - Interrupt Handler
3643 * @irq: interrupt number
3644 * @data: pointer to a network interface device structure
3648 irqreturn_t e1000_intr_msi(int irq, void *data)
3650 struct net_device *netdev = data;
3651 struct e1000_adapter *adapter = netdev_priv(netdev);
3652 struct e1000_hw *hw = &adapter->hw;
3653 #ifndef CONFIG_E1000_NAPI
3657 /* this code avoids the read of ICR but has to get 1000 interrupts
3658 * at every link change event before it will notice the change */
3659 if (++adapter->detect_link >= 1000) {
3660 uint32_t icr = E1000_READ_REG(hw, ICR);
3661 #ifdef CONFIG_E1000_NAPI
3662 /* read ICR disables interrupts using IAM, so keep up with our
3663 * enable/disable accounting */
3664 atomic_inc(&adapter->irq_sem);
3666 adapter->detect_link = 0;
3667 if ((icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) &&
3668 (icr & E1000_ICR_INT_ASSERTED)) {
3669 hw->get_link_status = 1;
3670 /* 80003ES2LAN workaround--
3671 * For packet buffer work-around on link down event;
3672 * disable receives here in the ISR and
3673 * reset adapter in watchdog
3675 if (netif_carrier_ok(netdev) &&
3676 (adapter->hw.mac_type == e1000_80003es2lan)) {
3677 /* disable receives */
3678 uint32_t rctl = E1000_READ_REG(hw, RCTL);
3679 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3681 /* guard against interrupt when we're going down */
3682 if (!test_bit(__E1000_DOWN, &adapter->flags))
3683 mod_timer(&adapter->watchdog_timer,
3687 E1000_WRITE_REG(hw, ICR, (0xffffffff & ~(E1000_ICR_RXSEQ |
3689 /* bummer we have to flush here, but things break otherwise as
3690 * some event appears to be lost or delayed and throughput
3691 * drops. In almost all tests this flush is un-necessary */
3692 E1000_WRITE_FLUSH(hw);
3693 #ifdef CONFIG_E1000_NAPI
3694 /* Interrupt Auto-Mask (IAM)...upon writing ICR, interrupts are
3695 * masked. No need for the IMC write, but it does mean we
3696 * should account for it ASAP. */
3697 atomic_inc(&adapter->irq_sem);
3701 #ifdef CONFIG_E1000_NAPI
3702 if (likely(netif_rx_schedule_prep(netdev))) {
3703 adapter->total_tx_bytes = 0;
3704 adapter->total_tx_packets = 0;
3705 adapter->total_rx_bytes = 0;
3706 adapter->total_rx_packets = 0;
3707 __netif_rx_schedule(netdev);
3709 e1000_irq_enable(adapter);
3711 adapter->total_tx_bytes = 0;
3712 adapter->total_rx_bytes = 0;
3713 adapter->total_tx_packets = 0;
3714 adapter->total_rx_packets = 0;
3716 for (i = 0; i < E1000_MAX_INTR; i++)
3717 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3718 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3721 if (likely(adapter->itr_setting & 3))
3722 e1000_set_itr(adapter);
3730 * e1000_intr - Interrupt Handler
3731 * @irq: interrupt number
3732 * @data: pointer to a network interface device structure
3736 e1000_intr(int irq, void *data)
3738 struct net_device *netdev = data;
3739 struct e1000_adapter *adapter = netdev_priv(netdev);
3740 struct e1000_hw *hw = &adapter->hw;
3741 uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
3742 #ifndef CONFIG_E1000_NAPI
3746 return IRQ_NONE; /* Not our interrupt */
3748 #ifdef CONFIG_E1000_NAPI
3749 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
3750 * not set, then the adapter didn't send an interrupt */
3751 if (unlikely(hw->mac_type >= e1000_82571 &&
3752 !(icr & E1000_ICR_INT_ASSERTED)))
3755 /* Interrupt Auto-Mask...upon reading ICR,
3756 * interrupts are masked. No need for the
3757 * IMC write, but it does mean we should
3758 * account for it ASAP. */
3759 if (likely(hw->mac_type >= e1000_82571))
3760 atomic_inc(&adapter->irq_sem);
3763 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3764 hw->get_link_status = 1;
3765 /* 80003ES2LAN workaround--
3766 * For packet buffer work-around on link down event;
3767 * disable receives here in the ISR and
3768 * reset adapter in watchdog
3770 if (netif_carrier_ok(netdev) &&
3771 (adapter->hw.mac_type == e1000_80003es2lan)) {
3772 /* disable receives */
3773 rctl = E1000_READ_REG(hw, RCTL);
3774 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3776 /* guard against interrupt when we're going down */
3777 if (!test_bit(__E1000_DOWN, &adapter->flags))
3778 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3781 #ifdef CONFIG_E1000_NAPI
3782 if (unlikely(hw->mac_type < e1000_82571)) {
3783 /* disable interrupts, without the synchronize_irq bit */
3784 atomic_inc(&adapter->irq_sem);
3785 E1000_WRITE_REG(hw, IMC, ~0);
3786 E1000_WRITE_FLUSH(hw);
3788 if (likely(netif_rx_schedule_prep(netdev))) {
3789 adapter->total_tx_bytes = 0;
3790 adapter->total_tx_packets = 0;
3791 adapter->total_rx_bytes = 0;
3792 adapter->total_rx_packets = 0;
3793 __netif_rx_schedule(netdev);
3795 /* this really should not happen! if it does it is basically a
3796 * bug, but not a hard error, so enable ints and continue */
3797 e1000_irq_enable(adapter);
3799 /* Writing IMC and IMS is needed for 82547.
3800 * Due to Hub Link bus being occupied, an interrupt
3801 * de-assertion message is not able to be sent.
3802 * When an interrupt assertion message is generated later,
3803 * two messages are re-ordered and sent out.
3804 * That causes APIC to think 82547 is in de-assertion
3805 * state, while 82547 is in assertion state, resulting
3806 * in dead lock. Writing IMC forces 82547 into
3807 * de-assertion state.
3809 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
3810 atomic_inc(&adapter->irq_sem);
3811 E1000_WRITE_REG(hw, IMC, ~0);
3814 adapter->total_tx_bytes = 0;
3815 adapter->total_rx_bytes = 0;
3816 adapter->total_tx_packets = 0;
3817 adapter->total_rx_packets = 0;
3819 for (i = 0; i < E1000_MAX_INTR; i++)
3820 if (unlikely(!adapter->clean_rx(adapter, adapter->rx_ring) &
3821 !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
3824 if (likely(adapter->itr_setting & 3))
3825 e1000_set_itr(adapter);
3827 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3828 e1000_irq_enable(adapter);
3834 #ifdef CONFIG_E1000_NAPI
3836 * e1000_clean - NAPI Rx polling callback
3837 * @adapter: board private structure
3841 e1000_clean(struct net_device *poll_dev, int *budget)
3843 struct e1000_adapter *adapter;
3844 int work_to_do = min(*budget, poll_dev->quota);
3845 int tx_cleaned = 0, work_done = 0;
3847 /* Must NOT use netdev_priv macro here. */
3848 adapter = poll_dev->priv;
3850 /* Keep link state information with original netdev */
3851 if (!netif_carrier_ok(poll_dev))
3854 /* e1000_clean is called per-cpu. This lock protects
3855 * tx_ring[0] from being cleaned by multiple cpus
3856 * simultaneously. A failure obtaining the lock means
3857 * tx_ring[0] is currently being cleaned anyway. */
3858 if (spin_trylock(&adapter->tx_queue_lock)) {
3859 tx_cleaned = e1000_clean_tx_irq(adapter,
3860 &adapter->tx_ring[0]);
3861 spin_unlock(&adapter->tx_queue_lock);
3864 adapter->clean_rx(adapter, &adapter->rx_ring[0],
3865 &work_done, work_to_do);
3867 *budget -= work_done;
3868 poll_dev->quota -= work_done;
3870 /* If no Tx and not enough Rx work done, exit the polling mode */
3871 if ((!tx_cleaned && (work_done == 0)) ||
3872 !netif_running(poll_dev)) {
3874 if (likely(adapter->itr_setting & 3))
3875 e1000_set_itr(adapter);
3876 netif_rx_complete(poll_dev);
3877 e1000_irq_enable(adapter);
3886 * e1000_clean_tx_irq - Reclaim resources after transmit completes
3887 * @adapter: board private structure
3891 e1000_clean_tx_irq(struct e1000_adapter *adapter,
3892 struct e1000_tx_ring *tx_ring)
3894 struct net_device *netdev = adapter->netdev;
3895 struct e1000_tx_desc *tx_desc, *eop_desc;
3896 struct e1000_buffer *buffer_info;
3897 unsigned int i, eop;
3898 #ifdef CONFIG_E1000_NAPI
3899 unsigned int count = 0;
3901 boolean_t cleaned = FALSE;
3902 unsigned int total_tx_bytes=0, total_tx_packets=0;
3904 i = tx_ring->next_to_clean;
3905 eop = tx_ring->buffer_info[i].next_to_watch;
3906 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3908 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
3909 for (cleaned = FALSE; !cleaned; ) {
3910 tx_desc = E1000_TX_DESC(*tx_ring, i);
3911 buffer_info = &tx_ring->buffer_info[i];
3912 cleaned = (i == eop);
3915 struct sk_buff *skb = buffer_info->skb;
3916 unsigned int segs = skb_shinfo(skb)->gso_segs;
3917 total_tx_packets += segs;
3919 total_tx_bytes += skb->len;
3921 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3922 tx_desc->upper.data = 0;
3924 if (unlikely(++i == tx_ring->count)) i = 0;
3927 eop = tx_ring->buffer_info[i].next_to_watch;
3928 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3929 #ifdef CONFIG_E1000_NAPI
3930 #define E1000_TX_WEIGHT 64
3931 /* weight of a sort for tx, to avoid endless transmit cleanup */
3932 if (count++ == E1000_TX_WEIGHT) break;
3936 tx_ring->next_to_clean = i;
3938 #define TX_WAKE_THRESHOLD 32
3939 if (unlikely(cleaned && netif_carrier_ok(netdev) &&
3940 E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3941 /* Make sure that anybody stopping the queue after this
3942 * sees the new next_to_clean.
3945 if (netif_queue_stopped(netdev)) {
3946 netif_wake_queue(netdev);
3947 ++adapter->restart_queue;
3951 if (adapter->detect_tx_hung) {
3952 /* Detect a transmit hang in hardware, this serializes the
3953 * check with the clearing of time_stamp and movement of i */
3954 adapter->detect_tx_hung = FALSE;
3955 if (tx_ring->buffer_info[eop].dma &&
3956 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3957 (adapter->tx_timeout_factor * HZ))
3958 && !(E1000_READ_REG(&adapter->hw, STATUS) &
3959 E1000_STATUS_TXOFF)) {
3961 /* detected Tx unit hang */
3962 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
3966 " next_to_use <%x>\n"
3967 " next_to_clean <%x>\n"
3968 "buffer_info[next_to_clean]\n"
3969 " time_stamp <%lx>\n"
3970 " next_to_watch <%x>\n"
3972 " next_to_watch.status <%x>\n",
3973 (unsigned long)((tx_ring - adapter->tx_ring) /
3974 sizeof(struct e1000_tx_ring)),
3975 readl(adapter->hw.hw_addr + tx_ring->tdh),
3976 readl(adapter->hw.hw_addr + tx_ring->tdt),
3977 tx_ring->next_to_use,
3978 tx_ring->next_to_clean,
3979 tx_ring->buffer_info[eop].time_stamp,
3982 eop_desc->upper.fields.status);
3983 netif_stop_queue(netdev);
3986 adapter->total_tx_bytes += total_tx_bytes;
3987 adapter->total_tx_packets += total_tx_packets;
3992 * e1000_rx_checksum - Receive Checksum Offload for 82543
3993 * @adapter: board private structure
3994 * @status_err: receive descriptor status and error fields
3995 * @csum: receive descriptor csum field
3996 * @sk_buff: socket buffer with received data
4000 e1000_rx_checksum(struct e1000_adapter *adapter,
4001 uint32_t status_err, uint32_t csum,
4002 struct sk_buff *skb)
4004 uint16_t status = (uint16_t)status_err;
4005 uint8_t errors = (uint8_t)(status_err >> 24);
4006 skb->ip_summed = CHECKSUM_NONE;
4008 /* 82543 or newer only */
4009 if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
4010 /* Ignore Checksum bit is set */
4011 if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
4012 /* TCP/UDP checksum error bit is set */
4013 if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
4014 /* let the stack verify checksum errors */
4015 adapter->hw_csum_err++;
4018 /* TCP/UDP Checksum has not been calculated */
4019 if (adapter->hw.mac_type <= e1000_82547_rev_2) {
4020 if (!(status & E1000_RXD_STAT_TCPCS))
4023 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
4026 /* It must be a TCP or UDP packet with a valid checksum */
4027 if (likely(status & E1000_RXD_STAT_TCPCS)) {
4028 /* TCP checksum is good */
4029 skb->ip_summed = CHECKSUM_UNNECESSARY;
4030 } else if (adapter->hw.mac_type > e1000_82547_rev_2) {
4031 /* IP fragment with UDP payload */
4032 /* Hardware complements the payload checksum, so we undo it
4033 * and then put the value in host order for further stack use.
4035 csum = ntohl(csum ^ 0xFFFF);
4037 skb->ip_summed = CHECKSUM_COMPLETE;
4039 adapter->hw_csum_good++;
4043 * e1000_clean_rx_irq - Send received data up the network stack; legacy
4044 * @adapter: board private structure
4048 #ifdef CONFIG_E1000_NAPI
4049 e1000_clean_rx_irq(struct e1000_adapter *adapter,
4050 struct e1000_rx_ring *rx_ring,
4051 int *work_done, int work_to_do)
4053 e1000_clean_rx_irq(struct e1000_adapter *adapter,
4054 struct e1000_rx_ring *rx_ring)
4057 struct net_device *netdev = adapter->netdev;
4058 struct pci_dev *pdev = adapter->pdev;
4059 struct e1000_rx_desc *rx_desc, *next_rxd;
4060 struct e1000_buffer *buffer_info, *next_buffer;
4061 unsigned long flags;
4065 int cleaned_count = 0;
4066 boolean_t cleaned = FALSE;
4067 unsigned int total_rx_bytes=0, total_rx_packets=0;
4069 i = rx_ring->next_to_clean;
4070 rx_desc = E1000_RX_DESC(*rx_ring, i);
4071 buffer_info = &rx_ring->buffer_info[i];
4073 while (rx_desc->status & E1000_RXD_STAT_DD) {
4074 struct sk_buff *skb;
4077 #ifdef CONFIG_E1000_NAPI
4078 if (*work_done >= work_to_do)
4082 status = rx_desc->status;
4083 skb = buffer_info->skb;
4084 buffer_info->skb = NULL;
4086 prefetch(skb->data - NET_IP_ALIGN);
4088 if (++i == rx_ring->count) i = 0;
4089 next_rxd = E1000_RX_DESC(*rx_ring, i);
4092 next_buffer = &rx_ring->buffer_info[i];
4096 pci_unmap_single(pdev,
4098 buffer_info->length,
4099 PCI_DMA_FROMDEVICE);
4101 length = le16_to_cpu(rx_desc->length);
4103 if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
4104 /* All receives must fit into a single buffer */
4105 E1000_DBG("%s: Receive packet consumed multiple"
4106 " buffers\n", netdev->name);
4108 buffer_info->skb = skb;
4112 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4113 last_byte = *(skb->data + length - 1);
4114 if (TBI_ACCEPT(&adapter->hw, status,
4115 rx_desc->errors, length, last_byte)) {
4116 spin_lock_irqsave(&adapter->stats_lock, flags);
4117 e1000_tbi_adjust_stats(&adapter->hw,
4120 spin_unlock_irqrestore(&adapter->stats_lock,
4125 buffer_info->skb = skb;
4130 /* adjust length to remove Ethernet CRC, this must be
4131 * done after the TBI_ACCEPT workaround above */
4134 /* probably a little skewed due to removing CRC */
4135 total_rx_bytes += length;
4138 /* code added for copybreak, this should improve
4139 * performance for small packets with large amounts
4140 * of reassembly being done in the stack */
4141 #define E1000_CB_LENGTH 256
4142 if (length < E1000_CB_LENGTH) {
4143 struct sk_buff *new_skb =
4144 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
4146 skb_reserve(new_skb, NET_IP_ALIGN);
4147 memcpy(new_skb->data - NET_IP_ALIGN,
4148 skb->data - NET_IP_ALIGN,
4149 length + NET_IP_ALIGN);
4150 /* save the skb in buffer_info as good */
4151 buffer_info->skb = skb;
4154 /* else just continue with the old one */
4156 /* end copybreak code */
4157 skb_put(skb, length);
4159 /* Receive Checksum Offload */
4160 e1000_rx_checksum(adapter,
4161 (uint32_t)(status) |
4162 ((uint32_t)(rx_desc->errors) << 24),
4163 le16_to_cpu(rx_desc->csum), skb);
4165 skb->protocol = eth_type_trans(skb, netdev);
4166 #ifdef CONFIG_E1000_NAPI
4167 if (unlikely(adapter->vlgrp &&
4168 (status & E1000_RXD_STAT_VP))) {
4169 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4170 le16_to_cpu(rx_desc->special) &
4171 E1000_RXD_SPC_VLAN_MASK);
4173 netif_receive_skb(skb);
4175 #else /* CONFIG_E1000_NAPI */
4176 if (unlikely(adapter->vlgrp &&
4177 (status & E1000_RXD_STAT_VP))) {
4178 vlan_hwaccel_rx(skb, adapter->vlgrp,
4179 le16_to_cpu(rx_desc->special) &
4180 E1000_RXD_SPC_VLAN_MASK);
4184 #endif /* CONFIG_E1000_NAPI */
4185 netdev->last_rx = jiffies;
4188 rx_desc->status = 0;
4190 /* return some buffers to hardware, one at a time is too slow */
4191 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4192 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4196 /* use prefetched values */
4198 buffer_info = next_buffer;
4200 rx_ring->next_to_clean = i;
4202 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4204 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4206 adapter->total_rx_packets += total_rx_packets;
4207 adapter->total_rx_bytes += total_rx_bytes;
4212 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
4213 * @adapter: board private structure
4217 #ifdef CONFIG_E1000_NAPI
4218 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4219 struct e1000_rx_ring *rx_ring,
4220 int *work_done, int work_to_do)
4222 e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4223 struct e1000_rx_ring *rx_ring)
4226 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
4227 struct net_device *netdev = adapter->netdev;
4228 struct pci_dev *pdev = adapter->pdev;
4229 struct e1000_buffer *buffer_info, *next_buffer;
4230 struct e1000_ps_page *ps_page;
4231 struct e1000_ps_page_dma *ps_page_dma;
4232 struct sk_buff *skb;
4234 uint32_t length, staterr;
4235 int cleaned_count = 0;
4236 boolean_t cleaned = FALSE;
4237 unsigned int total_rx_bytes=0, total_rx_packets=0;
4239 i = rx_ring->next_to_clean;
4240 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4241 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4242 buffer_info = &rx_ring->buffer_info[i];
4244 while (staterr & E1000_RXD_STAT_DD) {
4245 ps_page = &rx_ring->ps_page[i];
4246 ps_page_dma = &rx_ring->ps_page_dma[i];
4247 #ifdef CONFIG_E1000_NAPI
4248 if (unlikely(*work_done >= work_to_do))
4252 skb = buffer_info->skb;
4254 /* in the packet split case this is header only */
4255 prefetch(skb->data - NET_IP_ALIGN);
4257 if (++i == rx_ring->count) i = 0;
4258 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
4261 next_buffer = &rx_ring->buffer_info[i];
4265 pci_unmap_single(pdev, buffer_info->dma,
4266 buffer_info->length,
4267 PCI_DMA_FROMDEVICE);
4269 if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
4270 E1000_DBG("%s: Packet Split buffers didn't pick up"
4271 " the full packet\n", netdev->name);
4272 dev_kfree_skb_irq(skb);
4276 if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
4277 dev_kfree_skb_irq(skb);
4281 length = le16_to_cpu(rx_desc->wb.middle.length0);
4283 if (unlikely(!length)) {
4284 E1000_DBG("%s: Last part of the packet spanning"
4285 " multiple descriptors\n", netdev->name);
4286 dev_kfree_skb_irq(skb);
4291 skb_put(skb, length);
4294 /* this looks ugly, but it seems compiler issues make it
4295 more efficient than reusing j */
4296 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
4298 /* page alloc/put takes too long and effects small packet
4299 * throughput, so unsplit small packets and save the alloc/put*/
4300 if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) {
4302 /* there is no documentation about how to call
4303 * kmap_atomic, so we can't hold the mapping
4305 pci_dma_sync_single_for_cpu(pdev,
4306 ps_page_dma->ps_page_dma[0],
4308 PCI_DMA_FROMDEVICE);
4309 vaddr = kmap_atomic(ps_page->ps_page[0],
4310 KM_SKB_DATA_SOFTIRQ);
4311 memcpy(skb->tail, vaddr, l1);
4312 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
4313 pci_dma_sync_single_for_device(pdev,
4314 ps_page_dma->ps_page_dma[0],
4315 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4316 /* remove the CRC */
4323 for (j = 0; j < adapter->rx_ps_pages; j++) {
4324 if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
4326 pci_unmap_page(pdev, ps_page_dma->ps_page_dma[j],
4327 PAGE_SIZE, PCI_DMA_FROMDEVICE);
4328 ps_page_dma->ps_page_dma[j] = 0;
4329 skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
4331 ps_page->ps_page[j] = NULL;
4333 skb->data_len += length;
4334 skb->truesize += length;
4337 /* strip the ethernet crc, problem is we're using pages now so
4338 * this whole operation can get a little cpu intensive */
4339 pskb_trim(skb, skb->len - 4);
4342 total_rx_bytes += skb->len;
4345 e1000_rx_checksum(adapter, staterr,
4346 le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
4347 skb->protocol = eth_type_trans(skb, netdev);
4349 if (likely(rx_desc->wb.upper.header_status &
4350 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
4351 adapter->rx_hdr_split++;
4352 #ifdef CONFIG_E1000_NAPI
4353 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4354 vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
4355 le16_to_cpu(rx_desc->wb.middle.vlan) &
4356 E1000_RXD_SPC_VLAN_MASK);
4358 netif_receive_skb(skb);
4360 #else /* CONFIG_E1000_NAPI */
4361 if (unlikely(adapter->vlgrp && (staterr & E1000_RXD_STAT_VP))) {
4362 vlan_hwaccel_rx(skb, adapter->vlgrp,
4363 le16_to_cpu(rx_desc->wb.middle.vlan) &
4364 E1000_RXD_SPC_VLAN_MASK);
4368 #endif /* CONFIG_E1000_NAPI */
4369 netdev->last_rx = jiffies;
4372 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
4373 buffer_info->skb = NULL;
4375 /* return some buffers to hardware, one at a time is too slow */
4376 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4377 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4381 /* use prefetched values */
4383 buffer_info = next_buffer;
4385 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
4387 rx_ring->next_to_clean = i;
4389 cleaned_count = E1000_DESC_UNUSED(rx_ring);
4391 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4393 adapter->total_rx_packets += total_rx_packets;
4394 adapter->total_rx_bytes += total_rx_bytes;
4399 * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4400 * @adapter: address of board private structure
4404 e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4405 struct e1000_rx_ring *rx_ring,
4408 struct net_device *netdev = adapter->netdev;
4409 struct pci_dev *pdev = adapter->pdev;
4410 struct e1000_rx_desc *rx_desc;
4411 struct e1000_buffer *buffer_info;
4412 struct sk_buff *skb;
4414 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
4416 i = rx_ring->next_to_use;
4417 buffer_info = &rx_ring->buffer_info[i];
4419 while (cleaned_count--) {
4420 skb = buffer_info->skb;
4426 skb = netdev_alloc_skb(netdev, bufsz);
4427 if (unlikely(!skb)) {
4428 /* Better luck next round */
4429 adapter->alloc_rx_buff_failed++;
4433 /* Fix for errata 23, can't cross 64kB boundary */
4434 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4435 struct sk_buff *oldskb = skb;
4436 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4437 "at %p\n", bufsz, skb->data);
4438 /* Try again, without freeing the previous */
4439 skb = netdev_alloc_skb(netdev, bufsz);
4440 /* Failed allocation, critical failure */
4442 dev_kfree_skb(oldskb);
4446 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4449 dev_kfree_skb(oldskb);
4450 break; /* while !buffer_info->skb */
4453 /* Use new allocation */
4454 dev_kfree_skb(oldskb);
4456 /* Make buffer alignment 2 beyond a 16 byte boundary
4457 * this will result in a 16 byte aligned IP header after
4458 * the 14 byte MAC header is removed
4460 skb_reserve(skb, NET_IP_ALIGN);
4462 buffer_info->skb = skb;
4463 buffer_info->length = adapter->rx_buffer_len;
4465 buffer_info->dma = pci_map_single(pdev,
4467 adapter->rx_buffer_len,
4468 PCI_DMA_FROMDEVICE);
4470 /* Fix for errata 23, can't cross 64kB boundary */
4471 if (!e1000_check_64k_bound(adapter,
4472 (void *)(unsigned long)buffer_info->dma,
4473 adapter->rx_buffer_len)) {
4474 DPRINTK(RX_ERR, ERR,
4475 "dma align check failed: %u bytes at %p\n",
4476 adapter->rx_buffer_len,
4477 (void *)(unsigned long)buffer_info->dma);
4479 buffer_info->skb = NULL;
4481 pci_unmap_single(pdev, buffer_info->dma,
4482 adapter->rx_buffer_len,
4483 PCI_DMA_FROMDEVICE);
4485 break; /* while !buffer_info->skb */
4487 rx_desc = E1000_RX_DESC(*rx_ring, i);
4488 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4490 if (unlikely(++i == rx_ring->count))
4492 buffer_info = &rx_ring->buffer_info[i];
4495 if (likely(rx_ring->next_to_use != i)) {
4496 rx_ring->next_to_use = i;
4497 if (unlikely(i-- == 0))
4498 i = (rx_ring->count - 1);
4500 /* Force memory writes to complete before letting h/w
4501 * know there are new descriptors to fetch. (Only
4502 * applicable for weak-ordered memory model archs,
4503 * such as IA-64). */
4505 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4510 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
4511 * @adapter: address of board private structure
4515 e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
4516 struct e1000_rx_ring *rx_ring,
4519 struct net_device *netdev = adapter->netdev;
4520 struct pci_dev *pdev = adapter->pdev;
4521 union e1000_rx_desc_packet_split *rx_desc;
4522 struct e1000_buffer *buffer_info;
4523 struct e1000_ps_page *ps_page;
4524 struct e1000_ps_page_dma *ps_page_dma;
4525 struct sk_buff *skb;
4528 i = rx_ring->next_to_use;
4529 buffer_info = &rx_ring->buffer_info[i];
4530 ps_page = &rx_ring->ps_page[i];
4531 ps_page_dma = &rx_ring->ps_page_dma[i];
4533 while (cleaned_count--) {
4534 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
4536 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
4537 if (j < adapter->rx_ps_pages) {
4538 if (likely(!ps_page->ps_page[j])) {
4539 ps_page->ps_page[j] =
4540 alloc_page(GFP_ATOMIC);
4541 if (unlikely(!ps_page->ps_page[j])) {
4542 adapter->alloc_rx_buff_failed++;
4545 ps_page_dma->ps_page_dma[j] =
4547 ps_page->ps_page[j],
4549 PCI_DMA_FROMDEVICE);
4551 /* Refresh the desc even if buffer_addrs didn't
4552 * change because each write-back erases
4555 rx_desc->read.buffer_addr[j+1] =
4556 cpu_to_le64(ps_page_dma->ps_page_dma[j]);
4558 rx_desc->read.buffer_addr[j+1] = ~0;
4561 skb = netdev_alloc_skb(netdev,
4562 adapter->rx_ps_bsize0 + NET_IP_ALIGN);
4564 if (unlikely(!skb)) {
4565 adapter->alloc_rx_buff_failed++;
4569 /* Make buffer alignment 2 beyond a 16 byte boundary
4570 * this will result in a 16 byte aligned IP header after
4571 * the 14 byte MAC header is removed
4573 skb_reserve(skb, NET_IP_ALIGN);
4575 buffer_info->skb = skb;
4576 buffer_info->length = adapter->rx_ps_bsize0;
4577 buffer_info->dma = pci_map_single(pdev, skb->data,
4578 adapter->rx_ps_bsize0,
4579 PCI_DMA_FROMDEVICE);
4581 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
4583 if (unlikely(++i == rx_ring->count)) i = 0;
4584 buffer_info = &rx_ring->buffer_info[i];
4585 ps_page = &rx_ring->ps_page[i];
4586 ps_page_dma = &rx_ring->ps_page_dma[i];
4590 if (likely(rx_ring->next_to_use != i)) {
4591 rx_ring->next_to_use = i;
4592 if (unlikely(i-- == 0)) i = (rx_ring->count - 1);
4594 /* Force memory writes to complete before letting h/w
4595 * know there are new descriptors to fetch. (Only
4596 * applicable for weak-ordered memory model archs,
4597 * such as IA-64). */
4599 /* Hardware increments by 16 bytes, but packet split
4600 * descriptors are 32 bytes...so we increment tail
4603 writel(i<<1, adapter->hw.hw_addr + rx_ring->rdt);
4608 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4613 e1000_smartspeed(struct e1000_adapter *adapter)
4615 uint16_t phy_status;
4618 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4619 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
4622 if (adapter->smartspeed == 0) {
4623 /* If Master/Slave config fault is asserted twice,
4624 * we assume back-to-back */
4625 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4626 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4627 e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
4628 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4629 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4630 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4631 phy_ctrl &= ~CR_1000T_MS_ENABLE;
4632 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
4634 adapter->smartspeed++;
4635 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4636 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
4638 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4639 MII_CR_RESTART_AUTO_NEG);
4640 e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
4645 } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4646 /* If still no link, perhaps using 2/3 pair cable */
4647 e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
4648 phy_ctrl |= CR_1000T_MS_ENABLE;
4649 e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
4650 if (!e1000_phy_setup_autoneg(&adapter->hw) &&
4651 !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
4652 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4653 MII_CR_RESTART_AUTO_NEG);
4654 e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
4657 /* Restart process after E1000_SMARTSPEED_MAX iterations */
4658 if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4659 adapter->smartspeed = 0;
4670 e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4676 return e1000_mii_ioctl(netdev, ifr, cmd);
4690 e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4692 struct e1000_adapter *adapter = netdev_priv(netdev);
4693 struct mii_ioctl_data *data = if_mii(ifr);
4697 unsigned long flags;
4699 if (adapter->hw.media_type != e1000_media_type_copper)
4704 data->phy_id = adapter->hw.phy_addr;
4707 if (!capable(CAP_NET_ADMIN))
4709 spin_lock_irqsave(&adapter->stats_lock, flags);
4710 if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
4712 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4715 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4718 if (!capable(CAP_NET_ADMIN))
4720 if (data->reg_num & ~(0x1F))
4722 mii_reg = data->val_in;
4723 spin_lock_irqsave(&adapter->stats_lock, flags);
4724 if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
4726 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4729 if (adapter->hw.media_type == e1000_media_type_copper) {
4730 switch (data->reg_num) {
4732 if (mii_reg & MII_CR_POWER_DOWN)
4734 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4735 adapter->hw.autoneg = 1;
4736 adapter->hw.autoneg_advertised = 0x2F;
4739 spddplx = SPEED_1000;
4740 else if (mii_reg & 0x2000)
4741 spddplx = SPEED_100;
4744 spddplx += (mii_reg & 0x100)
4747 retval = e1000_set_spd_dplx(adapter,
4750 spin_unlock_irqrestore(
4751 &adapter->stats_lock,
4756 if (netif_running(adapter->netdev))
4757 e1000_reinit_locked(adapter);
4759 e1000_reset(adapter);
4761 case M88E1000_PHY_SPEC_CTRL:
4762 case M88E1000_EXT_PHY_SPEC_CTRL:
4763 if (e1000_phy_reset(&adapter->hw)) {
4764 spin_unlock_irqrestore(
4765 &adapter->stats_lock, flags);
4771 switch (data->reg_num) {
4773 if (mii_reg & MII_CR_POWER_DOWN)
4775 if (netif_running(adapter->netdev))
4776 e1000_reinit_locked(adapter);
4778 e1000_reset(adapter);
4782 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4787 return E1000_SUCCESS;
4791 e1000_pci_set_mwi(struct e1000_hw *hw)
4793 struct e1000_adapter *adapter = hw->back;
4794 int ret_val = pci_set_mwi(adapter->pdev);
4797 DPRINTK(PROBE, ERR, "Error in setting MWI\n");
4801 e1000_pci_clear_mwi(struct e1000_hw *hw)
4803 struct e1000_adapter *adapter = hw->back;
4805 pci_clear_mwi(adapter->pdev);
4809 e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4811 struct e1000_adapter *adapter = hw->back;
4813 pci_read_config_word(adapter->pdev, reg, value);
4817 e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4819 struct e1000_adapter *adapter = hw->back;
4821 pci_write_config_word(adapter->pdev, reg, *value);
4825 e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4827 struct e1000_adapter *adapter = hw->back;
4828 uint16_t cap_offset;
4830 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4832 return -E1000_ERR_CONFIG;
4834 pci_read_config_word(adapter->pdev, cap_offset + reg, value);
4836 return E1000_SUCCESS;
4840 e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
4846 e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
4848 struct e1000_adapter *adapter = netdev_priv(netdev);
4849 uint32_t ctrl, rctl;
4851 e1000_irq_disable(adapter);
4852 adapter->vlgrp = grp;
4855 /* enable VLAN tag insert/strip */
4856 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4857 ctrl |= E1000_CTRL_VME;
4858 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4860 if (adapter->hw.mac_type != e1000_ich8lan) {
4861 /* enable VLAN receive filtering */
4862 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4863 rctl |= E1000_RCTL_VFE;
4864 rctl &= ~E1000_RCTL_CFIEN;
4865 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4866 e1000_update_mng_vlan(adapter);
4869 /* disable VLAN tag insert/strip */
4870 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
4871 ctrl &= ~E1000_CTRL_VME;
4872 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
4874 if (adapter->hw.mac_type != e1000_ich8lan) {
4875 /* disable VLAN filtering */
4876 rctl = E1000_READ_REG(&adapter->hw, RCTL);
4877 rctl &= ~E1000_RCTL_VFE;
4878 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
4879 if (adapter->mng_vlan_id !=
4880 (uint16_t)E1000_MNG_VLAN_NONE) {
4881 e1000_vlan_rx_kill_vid(netdev,
4882 adapter->mng_vlan_id);
4883 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4888 e1000_irq_enable(adapter);
4892 e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
4894 struct e1000_adapter *adapter = netdev_priv(netdev);
4895 uint32_t vfta, index;
4897 if ((adapter->hw.mng_cookie.status &
4898 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4899 (vid == adapter->mng_vlan_id))
4901 /* add VID to filter table */
4902 index = (vid >> 5) & 0x7F;
4903 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
4904 vfta |= (1 << (vid & 0x1F));
4905 e1000_write_vfta(&adapter->hw, index, vfta);
4909 e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
4911 struct e1000_adapter *adapter = netdev_priv(netdev);
4912 uint32_t vfta, index;
4914 e1000_irq_disable(adapter);
4917 adapter->vlgrp->vlan_devices[vid] = NULL;
4919 e1000_irq_enable(adapter);
4921 if ((adapter->hw.mng_cookie.status &
4922 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4923 (vid == adapter->mng_vlan_id)) {
4924 /* release control to f/w */
4925 e1000_release_hw_control(adapter);
4929 /* remove VID from filter table */
4930 index = (vid >> 5) & 0x7F;
4931 vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
4932 vfta &= ~(1 << (vid & 0x1F));
4933 e1000_write_vfta(&adapter->hw, index, vfta);
4937 e1000_restore_vlan(struct e1000_adapter *adapter)
4939 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
4941 if (adapter->vlgrp) {
4943 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4944 if (!adapter->vlgrp->vlan_devices[vid])
4946 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4952 e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
4954 adapter->hw.autoneg = 0;
4956 /* Fiber NICs only allow 1000 gbps Full duplex */
4957 if ((adapter->hw.media_type == e1000_media_type_fiber) &&
4958 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4959 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4964 case SPEED_10 + DUPLEX_HALF:
4965 adapter->hw.forced_speed_duplex = e1000_10_half;
4967 case SPEED_10 + DUPLEX_FULL:
4968 adapter->hw.forced_speed_duplex = e1000_10_full;
4970 case SPEED_100 + DUPLEX_HALF:
4971 adapter->hw.forced_speed_duplex = e1000_100_half;
4973 case SPEED_100 + DUPLEX_FULL:
4974 adapter->hw.forced_speed_duplex = e1000_100_full;
4976 case SPEED_1000 + DUPLEX_FULL:
4977 adapter->hw.autoneg = 1;
4978 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
4980 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4982 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
4989 /* Save/restore 16 or 64 dwords of PCI config space depending on which
4990 * bus we're on (PCI(X) vs. PCI-E)
4992 #define PCIE_CONFIG_SPACE_LEN 256
4993 #define PCI_CONFIG_SPACE_LEN 64
4995 e1000_pci_save_state(struct e1000_adapter *adapter)
4997 struct pci_dev *dev = adapter->pdev;
5001 if (adapter->hw.mac_type >= e1000_82571)
5002 size = PCIE_CONFIG_SPACE_LEN;
5004 size = PCI_CONFIG_SPACE_LEN;
5006 WARN_ON(adapter->config_space != NULL);
5008 adapter->config_space = kmalloc(size, GFP_KERNEL);
5009 if (!adapter->config_space) {
5010 DPRINTK(PROBE, ERR, "unable to allocate %d bytes\n", size);
5013 for (i = 0; i < (size / 4); i++)
5014 pci_read_config_dword(dev, i * 4, &adapter->config_space[i]);
5019 e1000_pci_restore_state(struct e1000_adapter *adapter)
5021 struct pci_dev *dev = adapter->pdev;
5025 if (adapter->config_space == NULL)
5028 if (adapter->hw.mac_type >= e1000_82571)
5029 size = PCIE_CONFIG_SPACE_LEN;
5031 size = PCI_CONFIG_SPACE_LEN;
5032 for (i = 0; i < (size / 4); i++)
5033 pci_write_config_dword(dev, i * 4, adapter->config_space[i]);
5034 kfree(adapter->config_space);
5035 adapter->config_space = NULL;
5038 #endif /* CONFIG_PM */
5041 e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5043 struct net_device *netdev = pci_get_drvdata(pdev);
5044 struct e1000_adapter *adapter = netdev_priv(netdev);
5045 uint32_t ctrl, ctrl_ext, rctl, status;
5046 uint32_t wufc = adapter->wol;
5051 netif_device_detach(netdev);
5053 if (netif_running(netdev)) {
5054 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
5055 e1000_down(adapter);
5059 /* Implement our own version of pci_save_state(pdev) because pci-
5060 * express adapters have 256-byte config spaces. */
5061 retval = e1000_pci_save_state(adapter);
5066 status = E1000_READ_REG(&adapter->hw, STATUS);
5067 if (status & E1000_STATUS_LU)
5068 wufc &= ~E1000_WUFC_LNKC;
5071 e1000_setup_rctl(adapter);
5072 e1000_set_multi(netdev);
5074 /* turn on all-multi mode if wake on multicast is enabled */
5075 if (wufc & E1000_WUFC_MC) {
5076 rctl = E1000_READ_REG(&adapter->hw, RCTL);
5077 rctl |= E1000_RCTL_MPE;
5078 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
5081 if (adapter->hw.mac_type >= e1000_82540) {
5082 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
5083 /* advertise wake from D3Cold */
5084 #define E1000_CTRL_ADVD3WUC 0x00100000
5085 /* phy power management enable */
5086 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5087 ctrl |= E1000_CTRL_ADVD3WUC |
5088 E1000_CTRL_EN_PHY_PWR_MGMT;
5089 E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
5092 if (adapter->hw.media_type == e1000_media_type_fiber ||
5093 adapter->hw.media_type == e1000_media_type_internal_serdes) {
5094 /* keep the laser running in D3 */
5095 ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
5096 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5097 E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
5100 /* Allow time for pending master requests to run */
5101 e1000_disable_pciex_master(&adapter->hw);
5103 E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
5104 E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
5105 pci_enable_wake(pdev, PCI_D3hot, 1);
5106 pci_enable_wake(pdev, PCI_D3cold, 1);
5108 E1000_WRITE_REG(&adapter->hw, WUC, 0);
5109 E1000_WRITE_REG(&adapter->hw, WUFC, 0);
5110 pci_enable_wake(pdev, PCI_D3hot, 0);
5111 pci_enable_wake(pdev, PCI_D3cold, 0);
5114 e1000_release_manageability(adapter);
5116 /* make sure adapter isn't asleep if manageability is enabled */
5117 if (adapter->en_mng_pt) {
5118 pci_enable_wake(pdev, PCI_D3hot, 1);
5119 pci_enable_wake(pdev, PCI_D3cold, 1);
5122 if (adapter->hw.phy_type == e1000_phy_igp_3)
5123 e1000_phy_powerdown_workaround(&adapter->hw);
5125 if (netif_running(netdev))
5126 e1000_free_irq(adapter);
5128 /* Release control of h/w to f/w. If f/w is AMT enabled, this
5129 * would have already happened in close and is redundant. */
5130 e1000_release_hw_control(adapter);
5132 pci_disable_device(pdev);
5134 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5141 e1000_resume(struct pci_dev *pdev)
5143 struct net_device *netdev = pci_get_drvdata(pdev);
5144 struct e1000_adapter *adapter = netdev_priv(netdev);
5147 pci_set_power_state(pdev, PCI_D0);
5148 e1000_pci_restore_state(adapter);
5149 if ((err = pci_enable_device(pdev))) {
5150 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
5153 pci_set_master(pdev);
5155 pci_enable_wake(pdev, PCI_D3hot, 0);
5156 pci_enable_wake(pdev, PCI_D3cold, 0);
5158 if (netif_running(netdev) && (err = e1000_request_irq(adapter)))
5161 e1000_power_up_phy(adapter);
5162 e1000_reset(adapter);
5163 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
5165 e1000_init_manageability(adapter);
5167 if (netif_running(netdev))
5170 netif_device_attach(netdev);
5172 /* If the controller is 82573 and f/w is AMT, do not set
5173 * DRV_LOAD until the interface is up. For all other cases,
5174 * let the f/w know that the h/w is now under the control
5176 if (adapter->hw.mac_type != e1000_82573 ||
5177 !e1000_check_mng_mode(&adapter->hw))
5178 e1000_get_hw_control(adapter);
5184 static void e1000_shutdown(struct pci_dev *pdev)
5186 e1000_suspend(pdev, PMSG_SUSPEND);
5189 #ifdef CONFIG_NET_POLL_CONTROLLER
5191 * Polling 'interrupt' - used by things like netconsole to send skbs
5192 * without having to re-enable interrupts. It's not called while
5193 * the interrupt routine is executing.
5196 e1000_netpoll(struct net_device *netdev)
5198 struct e1000_adapter *adapter = netdev_priv(netdev);
5200 disable_irq(adapter->pdev->irq);
5201 e1000_intr(adapter->pdev->irq, netdev);
5202 e1000_clean_tx_irq(adapter, adapter->tx_ring);
5203 #ifndef CONFIG_E1000_NAPI
5204 adapter->clean_rx(adapter, adapter->rx_ring);
5206 enable_irq(adapter->pdev->irq);
5211 * e1000_io_error_detected - called when PCI error is detected
5212 * @pdev: Pointer to PCI device
5213 * @state: The current pci conneection state
5215 * This function is called after a PCI bus error affecting
5216 * this device has been detected.
5218 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
5220 struct net_device *netdev = pci_get_drvdata(pdev);
5221 struct e1000_adapter *adapter = netdev->priv;
5223 netif_device_detach(netdev);
5225 if (netif_running(netdev))
5226 e1000_down(adapter);
5227 pci_disable_device(pdev);
5229 /* Request a slot slot reset. */
5230 return PCI_ERS_RESULT_NEED_RESET;
5234 * e1000_io_slot_reset - called after the pci bus has been reset.
5235 * @pdev: Pointer to PCI device
5237 * Restart the card from scratch, as if from a cold-boot. Implementation
5238 * resembles the first-half of the e1000_resume routine.
5240 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5242 struct net_device *netdev = pci_get_drvdata(pdev);
5243 struct e1000_adapter *adapter = netdev->priv;
5245 if (pci_enable_device(pdev)) {
5246 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
5247 return PCI_ERS_RESULT_DISCONNECT;
5249 pci_set_master(pdev);
5251 pci_enable_wake(pdev, PCI_D3hot, 0);
5252 pci_enable_wake(pdev, PCI_D3cold, 0);
5254 e1000_reset(adapter);
5255 E1000_WRITE_REG(&adapter->hw, WUS, ~0);
5257 return PCI_ERS_RESULT_RECOVERED;
5261 * e1000_io_resume - called when traffic can start flowing again.
5262 * @pdev: Pointer to PCI device
5264 * This callback is called when the error recovery driver tells us that
5265 * its OK to resume normal operation. Implementation resembles the
5266 * second-half of the e1000_resume routine.
5268 static void e1000_io_resume(struct pci_dev *pdev)
5270 struct net_device *netdev = pci_get_drvdata(pdev);
5271 struct e1000_adapter *adapter = netdev->priv;
5273 e1000_init_manageability(adapter);
5275 if (netif_running(netdev)) {
5276 if (e1000_up(adapter)) {
5277 printk("e1000: can't bring device back up after reset\n");
5282 netif_device_attach(netdev);
5284 /* If the controller is 82573 and f/w is AMT, do not set
5285 * DRV_LOAD until the interface is up. For all other cases,
5286 * let the f/w know that the h/w is now under the control
5288 if (adapter->hw.mac_type != e1000_82573 ||
5289 !e1000_check_mng_mode(&adapter->hw))
5290 e1000_get_hw_control(adapter);