ixgbe: Correctly name and handle MSI-X other interrupt
[pandora-kernel.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 /* Intel Media SOC GbE MDIO physical base address */
37 static unsigned long ce4100_gbe_mdio_base_phy;
38 /* Intel Media SOC GbE MDIO virtual base address */
39 void __iomem *ce4100_gbe_mdio_base_virt;
40
41 char e1000_driver_name[] = "e1000";
42 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
43 #define DRV_VERSION "7.3.21-k8-NAPI"
44 const char e1000_driver_version[] = DRV_VERSION;
45 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
46
47 /* e1000_pci_tbl - PCI Device ID Table
48  *
49  * Last entry must be all 0s
50  *
51  * Macro expands to...
52  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
53  */
54 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
55         INTEL_E1000_ETHERNET_DEVICE(0x1000),
56         INTEL_E1000_ETHERNET_DEVICE(0x1001),
57         INTEL_E1000_ETHERNET_DEVICE(0x1004),
58         INTEL_E1000_ETHERNET_DEVICE(0x1008),
59         INTEL_E1000_ETHERNET_DEVICE(0x1009),
60         INTEL_E1000_ETHERNET_DEVICE(0x100C),
61         INTEL_E1000_ETHERNET_DEVICE(0x100D),
62         INTEL_E1000_ETHERNET_DEVICE(0x100E),
63         INTEL_E1000_ETHERNET_DEVICE(0x100F),
64         INTEL_E1000_ETHERNET_DEVICE(0x1010),
65         INTEL_E1000_ETHERNET_DEVICE(0x1011),
66         INTEL_E1000_ETHERNET_DEVICE(0x1012),
67         INTEL_E1000_ETHERNET_DEVICE(0x1013),
68         INTEL_E1000_ETHERNET_DEVICE(0x1014),
69         INTEL_E1000_ETHERNET_DEVICE(0x1015),
70         INTEL_E1000_ETHERNET_DEVICE(0x1016),
71         INTEL_E1000_ETHERNET_DEVICE(0x1017),
72         INTEL_E1000_ETHERNET_DEVICE(0x1018),
73         INTEL_E1000_ETHERNET_DEVICE(0x1019),
74         INTEL_E1000_ETHERNET_DEVICE(0x101A),
75         INTEL_E1000_ETHERNET_DEVICE(0x101D),
76         INTEL_E1000_ETHERNET_DEVICE(0x101E),
77         INTEL_E1000_ETHERNET_DEVICE(0x1026),
78         INTEL_E1000_ETHERNET_DEVICE(0x1027),
79         INTEL_E1000_ETHERNET_DEVICE(0x1028),
80         INTEL_E1000_ETHERNET_DEVICE(0x1075),
81         INTEL_E1000_ETHERNET_DEVICE(0x1076),
82         INTEL_E1000_ETHERNET_DEVICE(0x1077),
83         INTEL_E1000_ETHERNET_DEVICE(0x1078),
84         INTEL_E1000_ETHERNET_DEVICE(0x1079),
85         INTEL_E1000_ETHERNET_DEVICE(0x107A),
86         INTEL_E1000_ETHERNET_DEVICE(0x107B),
87         INTEL_E1000_ETHERNET_DEVICE(0x107C),
88         INTEL_E1000_ETHERNET_DEVICE(0x108A),
89         INTEL_E1000_ETHERNET_DEVICE(0x1099),
90         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
91         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
92         /* required last entry */
93         {0,}
94 };
95
96 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
97
98 int e1000_up(struct e1000_adapter *adapter);
99 void e1000_down(struct e1000_adapter *adapter);
100 void e1000_reinit_locked(struct e1000_adapter *adapter);
101 void e1000_reset(struct e1000_adapter *adapter);
102 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
103 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
104 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
105 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
106 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
107                              struct e1000_tx_ring *txdr);
108 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
109                              struct e1000_rx_ring *rxdr);
110 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
111                              struct e1000_tx_ring *tx_ring);
112 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
113                              struct e1000_rx_ring *rx_ring);
114 void e1000_update_stats(struct e1000_adapter *adapter);
115
116 static int e1000_init_module(void);
117 static void e1000_exit_module(void);
118 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
119 static void __devexit e1000_remove(struct pci_dev *pdev);
120 static int e1000_alloc_queues(struct e1000_adapter *adapter);
121 static int e1000_sw_init(struct e1000_adapter *adapter);
122 static int e1000_open(struct net_device *netdev);
123 static int e1000_close(struct net_device *netdev);
124 static void e1000_configure_tx(struct e1000_adapter *adapter);
125 static void e1000_configure_rx(struct e1000_adapter *adapter);
126 static void e1000_setup_rctl(struct e1000_adapter *adapter);
127 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
128 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
129 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
130                                 struct e1000_tx_ring *tx_ring);
131 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
132                                 struct e1000_rx_ring *rx_ring);
133 static void e1000_set_rx_mode(struct net_device *netdev);
134 static void e1000_update_phy_info(unsigned long data);
135 static void e1000_update_phy_info_task(struct work_struct *work);
136 static void e1000_watchdog(unsigned long data);
137 static void e1000_82547_tx_fifo_stall(unsigned long data);
138 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
139 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
140                                     struct net_device *netdev);
141 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
142 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
143 static int e1000_set_mac(struct net_device *netdev, void *p);
144 static irqreturn_t e1000_intr(int irq, void *data);
145 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
146                                struct e1000_tx_ring *tx_ring);
147 static int e1000_clean(struct napi_struct *napi, int budget);
148 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
149                                struct e1000_rx_ring *rx_ring,
150                                int *work_done, int work_to_do);
151 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
152                                      struct e1000_rx_ring *rx_ring,
153                                      int *work_done, int work_to_do);
154 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
155                                    struct e1000_rx_ring *rx_ring,
156                                    int cleaned_count);
157 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
158                                          struct e1000_rx_ring *rx_ring,
159                                          int cleaned_count);
160 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
161 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
162                            int cmd);
163 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
164 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
165 static void e1000_tx_timeout(struct net_device *dev);
166 static void e1000_reset_task(struct work_struct *work);
167 static void e1000_smartspeed(struct e1000_adapter *adapter);
168 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
169                                        struct sk_buff *skb);
170
171 static bool e1000_vlan_used(struct e1000_adapter *adapter);
172 static void e1000_vlan_mode(struct net_device *netdev, u32 features);
173 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
174 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
175 static void e1000_restore_vlan(struct e1000_adapter *adapter);
176
177 #ifdef CONFIG_PM
178 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
179 static int e1000_resume(struct pci_dev *pdev);
180 #endif
181 static void e1000_shutdown(struct pci_dev *pdev);
182
183 #ifdef CONFIG_NET_POLL_CONTROLLER
184 /* for netdump / net console */
185 static void e1000_netpoll (struct net_device *netdev);
186 #endif
187
188 #define COPYBREAK_DEFAULT 256
189 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
190 module_param(copybreak, uint, 0644);
191 MODULE_PARM_DESC(copybreak,
192         "Maximum size of packet that is copied to a new buffer on receive");
193
194 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
195                      pci_channel_state_t state);
196 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
197 static void e1000_io_resume(struct pci_dev *pdev);
198
199 static struct pci_error_handlers e1000_err_handler = {
200         .error_detected = e1000_io_error_detected,
201         .slot_reset = e1000_io_slot_reset,
202         .resume = e1000_io_resume,
203 };
204
205 static struct pci_driver e1000_driver = {
206         .name     = e1000_driver_name,
207         .id_table = e1000_pci_tbl,
208         .probe    = e1000_probe,
209         .remove   = __devexit_p(e1000_remove),
210 #ifdef CONFIG_PM
211         /* Power Management Hooks */
212         .suspend  = e1000_suspend,
213         .resume   = e1000_resume,
214 #endif
215         .shutdown = e1000_shutdown,
216         .err_handler = &e1000_err_handler
217 };
218
219 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
220 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_VERSION);
223
224 static int debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
225 module_param(debug, int, 0);
226 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
227
228 /**
229  * e1000_get_hw_dev - return device
230  * used by hardware layer to print debugging information
231  *
232  **/
233 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
234 {
235         struct e1000_adapter *adapter = hw->back;
236         return adapter->netdev;
237 }
238
239 /**
240  * e1000_init_module - Driver Registration Routine
241  *
242  * e1000_init_module is the first routine called when the driver is
243  * loaded. All it does is register with the PCI subsystem.
244  **/
245
246 static int __init e1000_init_module(void)
247 {
248         int ret;
249         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
250
251         pr_info("%s\n", e1000_copyright);
252
253         ret = pci_register_driver(&e1000_driver);
254         if (copybreak != COPYBREAK_DEFAULT) {
255                 if (copybreak == 0)
256                         pr_info("copybreak disabled\n");
257                 else
258                         pr_info("copybreak enabled for "
259                                    "packets <= %u bytes\n", copybreak);
260         }
261         return ret;
262 }
263
264 module_init(e1000_init_module);
265
266 /**
267  * e1000_exit_module - Driver Exit Cleanup Routine
268  *
269  * e1000_exit_module is called just before the driver is removed
270  * from memory.
271  **/
272
273 static void __exit e1000_exit_module(void)
274 {
275         pci_unregister_driver(&e1000_driver);
276 }
277
278 module_exit(e1000_exit_module);
279
280 static int e1000_request_irq(struct e1000_adapter *adapter)
281 {
282         struct net_device *netdev = adapter->netdev;
283         irq_handler_t handler = e1000_intr;
284         int irq_flags = IRQF_SHARED;
285         int err;
286
287         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
288                           netdev);
289         if (err) {
290                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
291         }
292
293         return err;
294 }
295
296 static void e1000_free_irq(struct e1000_adapter *adapter)
297 {
298         struct net_device *netdev = adapter->netdev;
299
300         free_irq(adapter->pdev->irq, netdev);
301 }
302
303 /**
304  * e1000_irq_disable - Mask off interrupt generation on the NIC
305  * @adapter: board private structure
306  **/
307
308 static void e1000_irq_disable(struct e1000_adapter *adapter)
309 {
310         struct e1000_hw *hw = &adapter->hw;
311
312         ew32(IMC, ~0);
313         E1000_WRITE_FLUSH();
314         synchronize_irq(adapter->pdev->irq);
315 }
316
317 /**
318  * e1000_irq_enable - Enable default interrupt generation settings
319  * @adapter: board private structure
320  **/
321
322 static void e1000_irq_enable(struct e1000_adapter *adapter)
323 {
324         struct e1000_hw *hw = &adapter->hw;
325
326         ew32(IMS, IMS_ENABLE_MASK);
327         E1000_WRITE_FLUSH();
328 }
329
330 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
331 {
332         struct e1000_hw *hw = &adapter->hw;
333         struct net_device *netdev = adapter->netdev;
334         u16 vid = hw->mng_cookie.vlan_id;
335         u16 old_vid = adapter->mng_vlan_id;
336
337         if (!e1000_vlan_used(adapter))
338                 return;
339
340         if (!test_bit(vid, adapter->active_vlans)) {
341                 if (hw->mng_cookie.status &
342                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
343                         e1000_vlan_rx_add_vid(netdev, vid);
344                         adapter->mng_vlan_id = vid;
345                 } else {
346                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
347                 }
348                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
349                     (vid != old_vid) &&
350                     !test_bit(old_vid, adapter->active_vlans))
351                         e1000_vlan_rx_kill_vid(netdev, old_vid);
352         } else {
353                 adapter->mng_vlan_id = vid;
354         }
355 }
356
357 static void e1000_init_manageability(struct e1000_adapter *adapter)
358 {
359         struct e1000_hw *hw = &adapter->hw;
360
361         if (adapter->en_mng_pt) {
362                 u32 manc = er32(MANC);
363
364                 /* disable hardware interception of ARP */
365                 manc &= ~(E1000_MANC_ARP_EN);
366
367                 ew32(MANC, manc);
368         }
369 }
370
371 static void e1000_release_manageability(struct e1000_adapter *adapter)
372 {
373         struct e1000_hw *hw = &adapter->hw;
374
375         if (adapter->en_mng_pt) {
376                 u32 manc = er32(MANC);
377
378                 /* re-enable hardware interception of ARP */
379                 manc |= E1000_MANC_ARP_EN;
380
381                 ew32(MANC, manc);
382         }
383 }
384
385 /**
386  * e1000_configure - configure the hardware for RX and TX
387  * @adapter = private board structure
388  **/
389 static void e1000_configure(struct e1000_adapter *adapter)
390 {
391         struct net_device *netdev = adapter->netdev;
392         int i;
393
394         e1000_set_rx_mode(netdev);
395
396         e1000_restore_vlan(adapter);
397         e1000_init_manageability(adapter);
398
399         e1000_configure_tx(adapter);
400         e1000_setup_rctl(adapter);
401         e1000_configure_rx(adapter);
402         /* call E1000_DESC_UNUSED which always leaves
403          * at least 1 descriptor unused to make sure
404          * next_to_use != next_to_clean */
405         for (i = 0; i < adapter->num_rx_queues; i++) {
406                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
407                 adapter->alloc_rx_buf(adapter, ring,
408                                       E1000_DESC_UNUSED(ring));
409         }
410 }
411
412 int e1000_up(struct e1000_adapter *adapter)
413 {
414         struct e1000_hw *hw = &adapter->hw;
415
416         /* hardware has been reset, we need to reload some things */
417         e1000_configure(adapter);
418
419         clear_bit(__E1000_DOWN, &adapter->flags);
420
421         napi_enable(&adapter->napi);
422
423         e1000_irq_enable(adapter);
424
425         netif_wake_queue(adapter->netdev);
426
427         /* fire a link change interrupt to start the watchdog */
428         ew32(ICS, E1000_ICS_LSC);
429         return 0;
430 }
431
432 /**
433  * e1000_power_up_phy - restore link in case the phy was powered down
434  * @adapter: address of board private structure
435  *
436  * The phy may be powered down to save power and turn off link when the
437  * driver is unloaded and wake on lan is not enabled (among others)
438  * *** this routine MUST be followed by a call to e1000_reset ***
439  *
440  **/
441
442 void e1000_power_up_phy(struct e1000_adapter *adapter)
443 {
444         struct e1000_hw *hw = &adapter->hw;
445         u16 mii_reg = 0;
446
447         /* Just clear the power down bit to wake the phy back up */
448         if (hw->media_type == e1000_media_type_copper) {
449                 /* according to the manual, the phy will retain its
450                  * settings across a power-down/up cycle */
451                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
452                 mii_reg &= ~MII_CR_POWER_DOWN;
453                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
454         }
455 }
456
457 static void e1000_power_down_phy(struct e1000_adapter *adapter)
458 {
459         struct e1000_hw *hw = &adapter->hw;
460
461         /* Power down the PHY so no link is implied when interface is down *
462          * The PHY cannot be powered down if any of the following is true *
463          * (a) WoL is enabled
464          * (b) AMT is active
465          * (c) SoL/IDER session is active */
466         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
467            hw->media_type == e1000_media_type_copper) {
468                 u16 mii_reg = 0;
469
470                 switch (hw->mac_type) {
471                 case e1000_82540:
472                 case e1000_82545:
473                 case e1000_82545_rev_3:
474                 case e1000_82546:
475                 case e1000_ce4100:
476                 case e1000_82546_rev_3:
477                 case e1000_82541:
478                 case e1000_82541_rev_2:
479                 case e1000_82547:
480                 case e1000_82547_rev_2:
481                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
482                                 goto out;
483                         break;
484                 default:
485                         goto out;
486                 }
487                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
488                 mii_reg |= MII_CR_POWER_DOWN;
489                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
490                 mdelay(1);
491         }
492 out:
493         return;
494 }
495
496 void e1000_down(struct e1000_adapter *adapter)
497 {
498         struct e1000_hw *hw = &adapter->hw;
499         struct net_device *netdev = adapter->netdev;
500         u32 rctl, tctl;
501
502
503         /* disable receives in the hardware */
504         rctl = er32(RCTL);
505         ew32(RCTL, rctl & ~E1000_RCTL_EN);
506         /* flush and sleep below */
507
508         netif_tx_disable(netdev);
509
510         /* disable transmits in the hardware */
511         tctl = er32(TCTL);
512         tctl &= ~E1000_TCTL_EN;
513         ew32(TCTL, tctl);
514         /* flush both disables and wait for them to finish */
515         E1000_WRITE_FLUSH();
516         msleep(10);
517
518         napi_disable(&adapter->napi);
519
520         e1000_irq_disable(adapter);
521
522         /*
523          * Setting DOWN must be after irq_disable to prevent
524          * a screaming interrupt.  Setting DOWN also prevents
525          * timers and tasks from rescheduling.
526          */
527         set_bit(__E1000_DOWN, &adapter->flags);
528
529         del_timer_sync(&adapter->tx_fifo_stall_timer);
530         del_timer_sync(&adapter->watchdog_timer);
531         del_timer_sync(&adapter->phy_info_timer);
532
533         adapter->link_speed = 0;
534         adapter->link_duplex = 0;
535         netif_carrier_off(netdev);
536
537         e1000_reset(adapter);
538         e1000_clean_all_tx_rings(adapter);
539         e1000_clean_all_rx_rings(adapter);
540 }
541
542 static void e1000_reinit_safe(struct e1000_adapter *adapter)
543 {
544         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
545                 msleep(1);
546         rtnl_lock();
547         e1000_down(adapter);
548         e1000_up(adapter);
549         rtnl_unlock();
550         clear_bit(__E1000_RESETTING, &adapter->flags);
551 }
552
553 void e1000_reinit_locked(struct e1000_adapter *adapter)
554 {
555         /* if rtnl_lock is not held the call path is bogus */
556         ASSERT_RTNL();
557         WARN_ON(in_interrupt());
558         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
559                 msleep(1);
560         e1000_down(adapter);
561         e1000_up(adapter);
562         clear_bit(__E1000_RESETTING, &adapter->flags);
563 }
564
565 void e1000_reset(struct e1000_adapter *adapter)
566 {
567         struct e1000_hw *hw = &adapter->hw;
568         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
569         bool legacy_pba_adjust = false;
570         u16 hwm;
571
572         /* Repartition Pba for greater than 9k mtu
573          * To take effect CTRL.RST is required.
574          */
575
576         switch (hw->mac_type) {
577         case e1000_82542_rev2_0:
578         case e1000_82542_rev2_1:
579         case e1000_82543:
580         case e1000_82544:
581         case e1000_82540:
582         case e1000_82541:
583         case e1000_82541_rev_2:
584                 legacy_pba_adjust = true;
585                 pba = E1000_PBA_48K;
586                 break;
587         case e1000_82545:
588         case e1000_82545_rev_3:
589         case e1000_82546:
590         case e1000_ce4100:
591         case e1000_82546_rev_3:
592                 pba = E1000_PBA_48K;
593                 break;
594         case e1000_82547:
595         case e1000_82547_rev_2:
596                 legacy_pba_adjust = true;
597                 pba = E1000_PBA_30K;
598                 break;
599         case e1000_undefined:
600         case e1000_num_macs:
601                 break;
602         }
603
604         if (legacy_pba_adjust) {
605                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
606                         pba -= 8; /* allocate more FIFO for Tx */
607
608                 if (hw->mac_type == e1000_82547) {
609                         adapter->tx_fifo_head = 0;
610                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
611                         adapter->tx_fifo_size =
612                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
613                         atomic_set(&adapter->tx_fifo_stall, 0);
614                 }
615         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
616                 /* adjust PBA for jumbo frames */
617                 ew32(PBA, pba);
618
619                 /* To maintain wire speed transmits, the Tx FIFO should be
620                  * large enough to accommodate two full transmit packets,
621                  * rounded up to the next 1KB and expressed in KB.  Likewise,
622                  * the Rx FIFO should be large enough to accommodate at least
623                  * one full receive packet and is similarly rounded up and
624                  * expressed in KB. */
625                 pba = er32(PBA);
626                 /* upper 16 bits has Tx packet buffer allocation size in KB */
627                 tx_space = pba >> 16;
628                 /* lower 16 bits has Rx packet buffer allocation size in KB */
629                 pba &= 0xffff;
630                 /*
631                  * the tx fifo also stores 16 bytes of information about the tx
632                  * but don't include ethernet FCS because hardware appends it
633                  */
634                 min_tx_space = (hw->max_frame_size +
635                                 sizeof(struct e1000_tx_desc) -
636                                 ETH_FCS_LEN) * 2;
637                 min_tx_space = ALIGN(min_tx_space, 1024);
638                 min_tx_space >>= 10;
639                 /* software strips receive CRC, so leave room for it */
640                 min_rx_space = hw->max_frame_size;
641                 min_rx_space = ALIGN(min_rx_space, 1024);
642                 min_rx_space >>= 10;
643
644                 /* If current Tx allocation is less than the min Tx FIFO size,
645                  * and the min Tx FIFO size is less than the current Rx FIFO
646                  * allocation, take space away from current Rx allocation */
647                 if (tx_space < min_tx_space &&
648                     ((min_tx_space - tx_space) < pba)) {
649                         pba = pba - (min_tx_space - tx_space);
650
651                         /* PCI/PCIx hardware has PBA alignment constraints */
652                         switch (hw->mac_type) {
653                         case e1000_82545 ... e1000_82546_rev_3:
654                                 pba &= ~(E1000_PBA_8K - 1);
655                                 break;
656                         default:
657                                 break;
658                         }
659
660                         /* if short on rx space, rx wins and must trump tx
661                          * adjustment or use Early Receive if available */
662                         if (pba < min_rx_space)
663                                 pba = min_rx_space;
664                 }
665         }
666
667         ew32(PBA, pba);
668
669         /*
670          * flow control settings:
671          * The high water mark must be low enough to fit one full frame
672          * (or the size used for early receive) above it in the Rx FIFO.
673          * Set it to the lower of:
674          * - 90% of the Rx FIFO size, and
675          * - the full Rx FIFO size minus the early receive size (for parts
676          *   with ERT support assuming ERT set to E1000_ERT_2048), or
677          * - the full Rx FIFO size minus one full frame
678          */
679         hwm = min(((pba << 10) * 9 / 10),
680                   ((pba << 10) - hw->max_frame_size));
681
682         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
683         hw->fc_low_water = hw->fc_high_water - 8;
684         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
685         hw->fc_send_xon = 1;
686         hw->fc = hw->original_fc;
687
688         /* Allow time for pending master requests to run */
689         e1000_reset_hw(hw);
690         if (hw->mac_type >= e1000_82544)
691                 ew32(WUC, 0);
692
693         if (e1000_init_hw(hw))
694                 e_dev_err("Hardware Error\n");
695         e1000_update_mng_vlan(adapter);
696
697         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
698         if (hw->mac_type >= e1000_82544 &&
699             hw->autoneg == 1 &&
700             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
701                 u32 ctrl = er32(CTRL);
702                 /* clear phy power management bit if we are in gig only mode,
703                  * which if enabled will attempt negotiation to 100Mb, which
704                  * can cause a loss of link at power off or driver unload */
705                 ctrl &= ~E1000_CTRL_SWDPIN3;
706                 ew32(CTRL, ctrl);
707         }
708
709         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
710         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
711
712         e1000_reset_adaptive(hw);
713         e1000_phy_get_info(hw, &adapter->phy_info);
714
715         e1000_release_manageability(adapter);
716 }
717
718 /**
719  *  Dump the eeprom for users having checksum issues
720  **/
721 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
722 {
723         struct net_device *netdev = adapter->netdev;
724         struct ethtool_eeprom eeprom;
725         const struct ethtool_ops *ops = netdev->ethtool_ops;
726         u8 *data;
727         int i;
728         u16 csum_old, csum_new = 0;
729
730         eeprom.len = ops->get_eeprom_len(netdev);
731         eeprom.offset = 0;
732
733         data = kmalloc(eeprom.len, GFP_KERNEL);
734         if (!data) {
735                 pr_err("Unable to allocate memory to dump EEPROM data\n");
736                 return;
737         }
738
739         ops->get_eeprom(netdev, &eeprom, data);
740
741         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
742                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
743         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
744                 csum_new += data[i] + (data[i + 1] << 8);
745         csum_new = EEPROM_SUM - csum_new;
746
747         pr_err("/*********************/\n");
748         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
749         pr_err("Calculated              : 0x%04x\n", csum_new);
750
751         pr_err("Offset    Values\n");
752         pr_err("========  ======\n");
753         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
754
755         pr_err("Include this output when contacting your support provider.\n");
756         pr_err("This is not a software error! Something bad happened to\n");
757         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
758         pr_err("result in further problems, possibly loss of data,\n");
759         pr_err("corruption or system hangs!\n");
760         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
761         pr_err("which is invalid and requires you to set the proper MAC\n");
762         pr_err("address manually before continuing to enable this network\n");
763         pr_err("device. Please inspect the EEPROM dump and report the\n");
764         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
765         pr_err("/*********************/\n");
766
767         kfree(data);
768 }
769
770 /**
771  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
772  * @pdev: PCI device information struct
773  *
774  * Return true if an adapter needs ioport resources
775  **/
776 static int e1000_is_need_ioport(struct pci_dev *pdev)
777 {
778         switch (pdev->device) {
779         case E1000_DEV_ID_82540EM:
780         case E1000_DEV_ID_82540EM_LOM:
781         case E1000_DEV_ID_82540EP:
782         case E1000_DEV_ID_82540EP_LOM:
783         case E1000_DEV_ID_82540EP_LP:
784         case E1000_DEV_ID_82541EI:
785         case E1000_DEV_ID_82541EI_MOBILE:
786         case E1000_DEV_ID_82541ER:
787         case E1000_DEV_ID_82541ER_LOM:
788         case E1000_DEV_ID_82541GI:
789         case E1000_DEV_ID_82541GI_LF:
790         case E1000_DEV_ID_82541GI_MOBILE:
791         case E1000_DEV_ID_82544EI_COPPER:
792         case E1000_DEV_ID_82544EI_FIBER:
793         case E1000_DEV_ID_82544GC_COPPER:
794         case E1000_DEV_ID_82544GC_LOM:
795         case E1000_DEV_ID_82545EM_COPPER:
796         case E1000_DEV_ID_82545EM_FIBER:
797         case E1000_DEV_ID_82546EB_COPPER:
798         case E1000_DEV_ID_82546EB_FIBER:
799         case E1000_DEV_ID_82546EB_QUAD_COPPER:
800                 return true;
801         default:
802                 return false;
803         }
804 }
805
806 static u32 e1000_fix_features(struct net_device *netdev, u32 features)
807 {
808         /*
809          * Since there is no support for separate rx/tx vlan accel
810          * enable/disable make sure tx flag is always in same state as rx.
811          */
812         if (features & NETIF_F_HW_VLAN_RX)
813                 features |= NETIF_F_HW_VLAN_TX;
814         else
815                 features &= ~NETIF_F_HW_VLAN_TX;
816
817         return features;
818 }
819
820 static int e1000_set_features(struct net_device *netdev, u32 features)
821 {
822         struct e1000_adapter *adapter = netdev_priv(netdev);
823         u32 changed = features ^ netdev->features;
824
825         if (changed & NETIF_F_HW_VLAN_RX)
826                 e1000_vlan_mode(netdev, features);
827
828         if (!(changed & NETIF_F_RXCSUM))
829                 return 0;
830
831         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
832
833         if (netif_running(netdev))
834                 e1000_reinit_locked(adapter);
835         else
836                 e1000_reset(adapter);
837
838         return 0;
839 }
840
841 static const struct net_device_ops e1000_netdev_ops = {
842         .ndo_open               = e1000_open,
843         .ndo_stop               = e1000_close,
844         .ndo_start_xmit         = e1000_xmit_frame,
845         .ndo_get_stats          = e1000_get_stats,
846         .ndo_set_rx_mode        = e1000_set_rx_mode,
847         .ndo_set_mac_address    = e1000_set_mac,
848         .ndo_tx_timeout         = e1000_tx_timeout,
849         .ndo_change_mtu         = e1000_change_mtu,
850         .ndo_do_ioctl           = e1000_ioctl,
851         .ndo_validate_addr      = eth_validate_addr,
852         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
853         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
854 #ifdef CONFIG_NET_POLL_CONTROLLER
855         .ndo_poll_controller    = e1000_netpoll,
856 #endif
857         .ndo_fix_features       = e1000_fix_features,
858         .ndo_set_features       = e1000_set_features,
859 };
860
861 /**
862  * e1000_init_hw_struct - initialize members of hw struct
863  * @adapter: board private struct
864  * @hw: structure used by e1000_hw.c
865  *
866  * Factors out initialization of the e1000_hw struct to its own function
867  * that can be called very early at init (just after struct allocation).
868  * Fields are initialized based on PCI device information and
869  * OS network device settings (MTU size).
870  * Returns negative error codes if MAC type setup fails.
871  */
872 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
873                                 struct e1000_hw *hw)
874 {
875         struct pci_dev *pdev = adapter->pdev;
876
877         /* PCI config space info */
878         hw->vendor_id = pdev->vendor;
879         hw->device_id = pdev->device;
880         hw->subsystem_vendor_id = pdev->subsystem_vendor;
881         hw->subsystem_id = pdev->subsystem_device;
882         hw->revision_id = pdev->revision;
883
884         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
885
886         hw->max_frame_size = adapter->netdev->mtu +
887                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
888         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
889
890         /* identify the MAC */
891         if (e1000_set_mac_type(hw)) {
892                 e_err(probe, "Unknown MAC Type\n");
893                 return -EIO;
894         }
895
896         switch (hw->mac_type) {
897         default:
898                 break;
899         case e1000_82541:
900         case e1000_82547:
901         case e1000_82541_rev_2:
902         case e1000_82547_rev_2:
903                 hw->phy_init_script = 1;
904                 break;
905         }
906
907         e1000_set_media_type(hw);
908         e1000_get_bus_info(hw);
909
910         hw->wait_autoneg_complete = false;
911         hw->tbi_compatibility_en = true;
912         hw->adaptive_ifs = true;
913
914         /* Copper options */
915
916         if (hw->media_type == e1000_media_type_copper) {
917                 hw->mdix = AUTO_ALL_MODES;
918                 hw->disable_polarity_correction = false;
919                 hw->master_slave = E1000_MASTER_SLAVE;
920         }
921
922         return 0;
923 }
924
925 /**
926  * e1000_probe - Device Initialization Routine
927  * @pdev: PCI device information struct
928  * @ent: entry in e1000_pci_tbl
929  *
930  * Returns 0 on success, negative on failure
931  *
932  * e1000_probe initializes an adapter identified by a pci_dev structure.
933  * The OS initialization, configuring of the adapter private structure,
934  * and a hardware reset occur.
935  **/
936 static int __devinit e1000_probe(struct pci_dev *pdev,
937                                  const struct pci_device_id *ent)
938 {
939         struct net_device *netdev;
940         struct e1000_adapter *adapter;
941         struct e1000_hw *hw;
942
943         static int cards_found = 0;
944         static int global_quad_port_a = 0; /* global ksp3 port a indication */
945         int i, err, pci_using_dac;
946         u16 eeprom_data = 0;
947         u16 tmp = 0;
948         u16 eeprom_apme_mask = E1000_EEPROM_APME;
949         int bars, need_ioport;
950
951         /* do not allocate ioport bars when not needed */
952         need_ioport = e1000_is_need_ioport(pdev);
953         if (need_ioport) {
954                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
955                 err = pci_enable_device(pdev);
956         } else {
957                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
958                 err = pci_enable_device_mem(pdev);
959         }
960         if (err)
961                 return err;
962
963         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
964         if (err)
965                 goto err_pci_reg;
966
967         pci_set_master(pdev);
968         err = pci_save_state(pdev);
969         if (err)
970                 goto err_alloc_etherdev;
971
972         err = -ENOMEM;
973         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
974         if (!netdev)
975                 goto err_alloc_etherdev;
976
977         SET_NETDEV_DEV(netdev, &pdev->dev);
978
979         pci_set_drvdata(pdev, netdev);
980         adapter = netdev_priv(netdev);
981         adapter->netdev = netdev;
982         adapter->pdev = pdev;
983         adapter->msg_enable = (1 << debug) - 1;
984         adapter->bars = bars;
985         adapter->need_ioport = need_ioport;
986
987         hw = &adapter->hw;
988         hw->back = adapter;
989
990         err = -EIO;
991         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
992         if (!hw->hw_addr)
993                 goto err_ioremap;
994
995         if (adapter->need_ioport) {
996                 for (i = BAR_1; i <= BAR_5; i++) {
997                         if (pci_resource_len(pdev, i) == 0)
998                                 continue;
999                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1000                                 hw->io_base = pci_resource_start(pdev, i);
1001                                 break;
1002                         }
1003                 }
1004         }
1005
1006         /* make ready for any if (hw->...) below */
1007         err = e1000_init_hw_struct(adapter, hw);
1008         if (err)
1009                 goto err_sw_init;
1010
1011         /*
1012          * there is a workaround being applied below that limits
1013          * 64-bit DMA addresses to 64-bit hardware.  There are some
1014          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1015          */
1016         pci_using_dac = 0;
1017         if ((hw->bus_type == e1000_bus_type_pcix) &&
1018             !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1019                 /*
1020                  * according to DMA-API-HOWTO, coherent calls will always
1021                  * succeed if the set call did
1022                  */
1023                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1024                 pci_using_dac = 1;
1025         } else {
1026                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1027                 if (err) {
1028                         pr_err("No usable DMA config, aborting\n");
1029                         goto err_dma;
1030                 }
1031                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1032         }
1033
1034         netdev->netdev_ops = &e1000_netdev_ops;
1035         e1000_set_ethtool_ops(netdev);
1036         netdev->watchdog_timeo = 5 * HZ;
1037         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1038
1039         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1040
1041         adapter->bd_number = cards_found;
1042
1043         /* setup the private structure */
1044
1045         err = e1000_sw_init(adapter);
1046         if (err)
1047                 goto err_sw_init;
1048
1049         err = -EIO;
1050         if (hw->mac_type == e1000_ce4100) {
1051                 ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
1052                 ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
1053                                                 pci_resource_len(pdev, BAR_1));
1054
1055                 if (!ce4100_gbe_mdio_base_virt)
1056                         goto err_mdio_ioremap;
1057         }
1058
1059         if (hw->mac_type >= e1000_82543) {
1060                 netdev->hw_features = NETIF_F_SG |
1061                                    NETIF_F_HW_CSUM |
1062                                    NETIF_F_HW_VLAN_RX;
1063                 netdev->features = NETIF_F_HW_VLAN_TX |
1064                                    NETIF_F_HW_VLAN_FILTER;
1065         }
1066
1067         if ((hw->mac_type >= e1000_82544) &&
1068            (hw->mac_type != e1000_82547))
1069                 netdev->hw_features |= NETIF_F_TSO;
1070
1071         netdev->features |= netdev->hw_features;
1072         netdev->hw_features |= NETIF_F_RXCSUM;
1073
1074         if (pci_using_dac) {
1075                 netdev->features |= NETIF_F_HIGHDMA;
1076                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1077         }
1078
1079         netdev->vlan_features |= NETIF_F_TSO;
1080         netdev->vlan_features |= NETIF_F_HW_CSUM;
1081         netdev->vlan_features |= NETIF_F_SG;
1082
1083         netdev->priv_flags |= IFF_UNICAST_FLT;
1084
1085         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1086
1087         /* initialize eeprom parameters */
1088         if (e1000_init_eeprom_params(hw)) {
1089                 e_err(probe, "EEPROM initialization failed\n");
1090                 goto err_eeprom;
1091         }
1092
1093         /* before reading the EEPROM, reset the controller to
1094          * put the device in a known good starting state */
1095
1096         e1000_reset_hw(hw);
1097
1098         /* make sure the EEPROM is good */
1099         if (e1000_validate_eeprom_checksum(hw) < 0) {
1100                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1101                 e1000_dump_eeprom(adapter);
1102                 /*
1103                  * set MAC address to all zeroes to invalidate and temporary
1104                  * disable this device for the user. This blocks regular
1105                  * traffic while still permitting ethtool ioctls from reaching
1106                  * the hardware as well as allowing the user to run the
1107                  * interface after manually setting a hw addr using
1108                  * `ip set address`
1109                  */
1110                 memset(hw->mac_addr, 0, netdev->addr_len);
1111         } else {
1112                 /* copy the MAC address out of the EEPROM */
1113                 if (e1000_read_mac_addr(hw))
1114                         e_err(probe, "EEPROM Read Error\n");
1115         }
1116         /* don't block initalization here due to bad MAC address */
1117         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1118         memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1119
1120         if (!is_valid_ether_addr(netdev->perm_addr))
1121                 e_err(probe, "Invalid MAC Address\n");
1122
1123         init_timer(&adapter->tx_fifo_stall_timer);
1124         adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
1125         adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
1126
1127         init_timer(&adapter->watchdog_timer);
1128         adapter->watchdog_timer.function = e1000_watchdog;
1129         adapter->watchdog_timer.data = (unsigned long) adapter;
1130
1131         init_timer(&adapter->phy_info_timer);
1132         adapter->phy_info_timer.function = e1000_update_phy_info;
1133         adapter->phy_info_timer.data = (unsigned long)adapter;
1134
1135         INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task);
1136         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1137         INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1138
1139         e1000_check_options(adapter);
1140
1141         /* Initial Wake on LAN setting
1142          * If APM wake is enabled in the EEPROM,
1143          * enable the ACPI Magic Packet filter
1144          */
1145
1146         switch (hw->mac_type) {
1147         case e1000_82542_rev2_0:
1148         case e1000_82542_rev2_1:
1149         case e1000_82543:
1150                 break;
1151         case e1000_82544:
1152                 e1000_read_eeprom(hw,
1153                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1154                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1155                 break;
1156         case e1000_82546:
1157         case e1000_82546_rev_3:
1158                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1159                         e1000_read_eeprom(hw,
1160                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1161                         break;
1162                 }
1163                 /* Fall Through */
1164         default:
1165                 e1000_read_eeprom(hw,
1166                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1167                 break;
1168         }
1169         if (eeprom_data & eeprom_apme_mask)
1170                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1171
1172         /* now that we have the eeprom settings, apply the special cases
1173          * where the eeprom may be wrong or the board simply won't support
1174          * wake on lan on a particular port */
1175         switch (pdev->device) {
1176         case E1000_DEV_ID_82546GB_PCIE:
1177                 adapter->eeprom_wol = 0;
1178                 break;
1179         case E1000_DEV_ID_82546EB_FIBER:
1180         case E1000_DEV_ID_82546GB_FIBER:
1181                 /* Wake events only supported on port A for dual fiber
1182                  * regardless of eeprom setting */
1183                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1184                         adapter->eeprom_wol = 0;
1185                 break;
1186         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1187                 /* if quad port adapter, disable WoL on all but port A */
1188                 if (global_quad_port_a != 0)
1189                         adapter->eeprom_wol = 0;
1190                 else
1191                         adapter->quad_port_a = 1;
1192                 /* Reset for multiple quad port adapters */
1193                 if (++global_quad_port_a == 4)
1194                         global_quad_port_a = 0;
1195                 break;
1196         }
1197
1198         /* initialize the wol settings based on the eeprom settings */
1199         adapter->wol = adapter->eeprom_wol;
1200         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1201
1202         /* Auto detect PHY address */
1203         if (hw->mac_type == e1000_ce4100) {
1204                 for (i = 0; i < 32; i++) {
1205                         hw->phy_addr = i;
1206                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1207                         if (tmp == 0 || tmp == 0xFF) {
1208                                 if (i == 31)
1209                                         goto err_eeprom;
1210                                 continue;
1211                         } else
1212                                 break;
1213                 }
1214         }
1215
1216         /* reset the hardware with the new settings */
1217         e1000_reset(adapter);
1218
1219         strcpy(netdev->name, "eth%d");
1220         err = register_netdev(netdev);
1221         if (err)
1222                 goto err_register;
1223
1224         e1000_vlan_mode(netdev, netdev->features);
1225
1226         /* print bus type/speed/width info */
1227         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1228                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1229                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1230                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1231                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1232                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1233                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1234                netdev->dev_addr);
1235
1236         /* carrier off reporting is important to ethtool even BEFORE open */
1237         netif_carrier_off(netdev);
1238
1239         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1240
1241         cards_found++;
1242         return 0;
1243
1244 err_register:
1245 err_eeprom:
1246         e1000_phy_hw_reset(hw);
1247
1248         if (hw->flash_address)
1249                 iounmap(hw->flash_address);
1250         kfree(adapter->tx_ring);
1251         kfree(adapter->rx_ring);
1252 err_dma:
1253 err_sw_init:
1254 err_mdio_ioremap:
1255         iounmap(ce4100_gbe_mdio_base_virt);
1256         iounmap(hw->hw_addr);
1257 err_ioremap:
1258         free_netdev(netdev);
1259 err_alloc_etherdev:
1260         pci_release_selected_regions(pdev, bars);
1261 err_pci_reg:
1262         pci_disable_device(pdev);
1263         return err;
1264 }
1265
1266 /**
1267  * e1000_remove - Device Removal Routine
1268  * @pdev: PCI device information struct
1269  *
1270  * e1000_remove is called by the PCI subsystem to alert the driver
1271  * that it should release a PCI device.  The could be caused by a
1272  * Hot-Plug event, or because the driver is going to be removed from
1273  * memory.
1274  **/
1275
1276 static void __devexit e1000_remove(struct pci_dev *pdev)
1277 {
1278         struct net_device *netdev = pci_get_drvdata(pdev);
1279         struct e1000_adapter *adapter = netdev_priv(netdev);
1280         struct e1000_hw *hw = &adapter->hw;
1281
1282         set_bit(__E1000_DOWN, &adapter->flags);
1283         del_timer_sync(&adapter->tx_fifo_stall_timer);
1284         del_timer_sync(&adapter->watchdog_timer);
1285         del_timer_sync(&adapter->phy_info_timer);
1286
1287         cancel_work_sync(&adapter->reset_task);
1288
1289         e1000_release_manageability(adapter);
1290
1291         unregister_netdev(netdev);
1292
1293         e1000_phy_hw_reset(hw);
1294
1295         kfree(adapter->tx_ring);
1296         kfree(adapter->rx_ring);
1297
1298         iounmap(hw->hw_addr);
1299         if (hw->flash_address)
1300                 iounmap(hw->flash_address);
1301         pci_release_selected_regions(pdev, adapter->bars);
1302
1303         free_netdev(netdev);
1304
1305         pci_disable_device(pdev);
1306 }
1307
1308 /**
1309  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1310  * @adapter: board private structure to initialize
1311  *
1312  * e1000_sw_init initializes the Adapter private data structure.
1313  * e1000_init_hw_struct MUST be called before this function
1314  **/
1315
1316 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1317 {
1318         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1319
1320         adapter->num_tx_queues = 1;
1321         adapter->num_rx_queues = 1;
1322
1323         if (e1000_alloc_queues(adapter)) {
1324                 e_err(probe, "Unable to allocate memory for queues\n");
1325                 return -ENOMEM;
1326         }
1327
1328         /* Explicitly disable IRQ since the NIC can be in any state. */
1329         e1000_irq_disable(adapter);
1330
1331         spin_lock_init(&adapter->stats_lock);
1332
1333         set_bit(__E1000_DOWN, &adapter->flags);
1334
1335         return 0;
1336 }
1337
1338 /**
1339  * e1000_alloc_queues - Allocate memory for all rings
1340  * @adapter: board private structure to initialize
1341  *
1342  * We allocate one ring per queue at run-time since we don't know the
1343  * number of queues at compile-time.
1344  **/
1345
1346 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1347 {
1348         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1349                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1350         if (!adapter->tx_ring)
1351                 return -ENOMEM;
1352
1353         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1354                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1355         if (!adapter->rx_ring) {
1356                 kfree(adapter->tx_ring);
1357                 return -ENOMEM;
1358         }
1359
1360         return E1000_SUCCESS;
1361 }
1362
1363 /**
1364  * e1000_open - Called when a network interface is made active
1365  * @netdev: network interface device structure
1366  *
1367  * Returns 0 on success, negative value on failure
1368  *
1369  * The open entry point is called when a network interface is made
1370  * active by the system (IFF_UP).  At this point all resources needed
1371  * for transmit and receive operations are allocated, the interrupt
1372  * handler is registered with the OS, the watchdog timer is started,
1373  * and the stack is notified that the interface is ready.
1374  **/
1375
1376 static int e1000_open(struct net_device *netdev)
1377 {
1378         struct e1000_adapter *adapter = netdev_priv(netdev);
1379         struct e1000_hw *hw = &adapter->hw;
1380         int err;
1381
1382         /* disallow open during test */
1383         if (test_bit(__E1000_TESTING, &adapter->flags))
1384                 return -EBUSY;
1385
1386         netif_carrier_off(netdev);
1387
1388         /* allocate transmit descriptors */
1389         err = e1000_setup_all_tx_resources(adapter);
1390         if (err)
1391                 goto err_setup_tx;
1392
1393         /* allocate receive descriptors */
1394         err = e1000_setup_all_rx_resources(adapter);
1395         if (err)
1396                 goto err_setup_rx;
1397
1398         e1000_power_up_phy(adapter);
1399
1400         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1401         if ((hw->mng_cookie.status &
1402                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1403                 e1000_update_mng_vlan(adapter);
1404         }
1405
1406         /* before we allocate an interrupt, we must be ready to handle it.
1407          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1408          * as soon as we call pci_request_irq, so we have to setup our
1409          * clean_rx handler before we do so.  */
1410         e1000_configure(adapter);
1411
1412         err = e1000_request_irq(adapter);
1413         if (err)
1414                 goto err_req_irq;
1415
1416         /* From here on the code is the same as e1000_up() */
1417         clear_bit(__E1000_DOWN, &adapter->flags);
1418
1419         napi_enable(&adapter->napi);
1420
1421         e1000_irq_enable(adapter);
1422
1423         netif_start_queue(netdev);
1424
1425         /* fire a link status change interrupt to start the watchdog */
1426         ew32(ICS, E1000_ICS_LSC);
1427
1428         return E1000_SUCCESS;
1429
1430 err_req_irq:
1431         e1000_power_down_phy(adapter);
1432         e1000_free_all_rx_resources(adapter);
1433 err_setup_rx:
1434         e1000_free_all_tx_resources(adapter);
1435 err_setup_tx:
1436         e1000_reset(adapter);
1437
1438         return err;
1439 }
1440
1441 /**
1442  * e1000_close - Disables a network interface
1443  * @netdev: network interface device structure
1444  *
1445  * Returns 0, this is not allowed to fail
1446  *
1447  * The close entry point is called when an interface is de-activated
1448  * by the OS.  The hardware is still under the drivers control, but
1449  * needs to be disabled.  A global MAC reset is issued to stop the
1450  * hardware, and all transmit and receive resources are freed.
1451  **/
1452
1453 static int e1000_close(struct net_device *netdev)
1454 {
1455         struct e1000_adapter *adapter = netdev_priv(netdev);
1456         struct e1000_hw *hw = &adapter->hw;
1457
1458         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1459         e1000_down(adapter);
1460         e1000_power_down_phy(adapter);
1461         e1000_free_irq(adapter);
1462
1463         e1000_free_all_tx_resources(adapter);
1464         e1000_free_all_rx_resources(adapter);
1465
1466         /* kill manageability vlan ID if supported, but not if a vlan with
1467          * the same ID is registered on the host OS (let 8021q kill it) */
1468         if ((hw->mng_cookie.status &
1469                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1470              !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1471                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1472         }
1473
1474         return 0;
1475 }
1476
1477 /**
1478  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1479  * @adapter: address of board private structure
1480  * @start: address of beginning of memory
1481  * @len: length of memory
1482  **/
1483 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1484                                   unsigned long len)
1485 {
1486         struct e1000_hw *hw = &adapter->hw;
1487         unsigned long begin = (unsigned long)start;
1488         unsigned long end = begin + len;
1489
1490         /* First rev 82545 and 82546 need to not allow any memory
1491          * write location to cross 64k boundary due to errata 23 */
1492         if (hw->mac_type == e1000_82545 ||
1493             hw->mac_type == e1000_ce4100 ||
1494             hw->mac_type == e1000_82546) {
1495                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1496         }
1497
1498         return true;
1499 }
1500
1501 /**
1502  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1503  * @adapter: board private structure
1504  * @txdr:    tx descriptor ring (for a specific queue) to setup
1505  *
1506  * Return 0 on success, negative on failure
1507  **/
1508
1509 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1510                                     struct e1000_tx_ring *txdr)
1511 {
1512         struct pci_dev *pdev = adapter->pdev;
1513         int size;
1514
1515         size = sizeof(struct e1000_buffer) * txdr->count;
1516         txdr->buffer_info = vzalloc(size);
1517         if (!txdr->buffer_info) {
1518                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1519                       "ring\n");
1520                 return -ENOMEM;
1521         }
1522
1523         /* round up to nearest 4K */
1524
1525         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1526         txdr->size = ALIGN(txdr->size, 4096);
1527
1528         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1529                                         GFP_KERNEL);
1530         if (!txdr->desc) {
1531 setup_tx_desc_die:
1532                 vfree(txdr->buffer_info);
1533                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1534                       "ring\n");
1535                 return -ENOMEM;
1536         }
1537
1538         /* Fix for errata 23, can't cross 64kB boundary */
1539         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1540                 void *olddesc = txdr->desc;
1541                 dma_addr_t olddma = txdr->dma;
1542                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1543                       txdr->size, txdr->desc);
1544                 /* Try again, without freeing the previous */
1545                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1546                                                 &txdr->dma, GFP_KERNEL);
1547                 /* Failed allocation, critical failure */
1548                 if (!txdr->desc) {
1549                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1550                                           olddma);
1551                         goto setup_tx_desc_die;
1552                 }
1553
1554                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1555                         /* give up */
1556                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1557                                           txdr->dma);
1558                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1559                                           olddma);
1560                         e_err(probe, "Unable to allocate aligned memory "
1561                               "for the transmit descriptor ring\n");
1562                         vfree(txdr->buffer_info);
1563                         return -ENOMEM;
1564                 } else {
1565                         /* Free old allocation, new allocation was successful */
1566                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1567                                           olddma);
1568                 }
1569         }
1570         memset(txdr->desc, 0, txdr->size);
1571
1572         txdr->next_to_use = 0;
1573         txdr->next_to_clean = 0;
1574
1575         return 0;
1576 }
1577
1578 /**
1579  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1580  *                                (Descriptors) for all queues
1581  * @adapter: board private structure
1582  *
1583  * Return 0 on success, negative on failure
1584  **/
1585
1586 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1587 {
1588         int i, err = 0;
1589
1590         for (i = 0; i < adapter->num_tx_queues; i++) {
1591                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1592                 if (err) {
1593                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1594                         for (i-- ; i >= 0; i--)
1595                                 e1000_free_tx_resources(adapter,
1596                                                         &adapter->tx_ring[i]);
1597                         break;
1598                 }
1599         }
1600
1601         return err;
1602 }
1603
1604 /**
1605  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1606  * @adapter: board private structure
1607  *
1608  * Configure the Tx unit of the MAC after a reset.
1609  **/
1610
1611 static void e1000_configure_tx(struct e1000_adapter *adapter)
1612 {
1613         u64 tdba;
1614         struct e1000_hw *hw = &adapter->hw;
1615         u32 tdlen, tctl, tipg;
1616         u32 ipgr1, ipgr2;
1617
1618         /* Setup the HW Tx Head and Tail descriptor pointers */
1619
1620         switch (adapter->num_tx_queues) {
1621         case 1:
1622         default:
1623                 tdba = adapter->tx_ring[0].dma;
1624                 tdlen = adapter->tx_ring[0].count *
1625                         sizeof(struct e1000_tx_desc);
1626                 ew32(TDLEN, tdlen);
1627                 ew32(TDBAH, (tdba >> 32));
1628                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1629                 ew32(TDT, 0);
1630                 ew32(TDH, 0);
1631                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1632                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1633                 break;
1634         }
1635
1636         /* Set the default values for the Tx Inter Packet Gap timer */
1637         if ((hw->media_type == e1000_media_type_fiber ||
1638              hw->media_type == e1000_media_type_internal_serdes))
1639                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1640         else
1641                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1642
1643         switch (hw->mac_type) {
1644         case e1000_82542_rev2_0:
1645         case e1000_82542_rev2_1:
1646                 tipg = DEFAULT_82542_TIPG_IPGT;
1647                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1648                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1649                 break;
1650         default:
1651                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1652                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1653                 break;
1654         }
1655         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1656         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1657         ew32(TIPG, tipg);
1658
1659         /* Set the Tx Interrupt Delay register */
1660
1661         ew32(TIDV, adapter->tx_int_delay);
1662         if (hw->mac_type >= e1000_82540)
1663                 ew32(TADV, adapter->tx_abs_int_delay);
1664
1665         /* Program the Transmit Control Register */
1666
1667         tctl = er32(TCTL);
1668         tctl &= ~E1000_TCTL_CT;
1669         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1670                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1671
1672         e1000_config_collision_dist(hw);
1673
1674         /* Setup Transmit Descriptor Settings for eop descriptor */
1675         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1676
1677         /* only set IDE if we are delaying interrupts using the timers */
1678         if (adapter->tx_int_delay)
1679                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1680
1681         if (hw->mac_type < e1000_82543)
1682                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1683         else
1684                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1685
1686         /* Cache if we're 82544 running in PCI-X because we'll
1687          * need this to apply a workaround later in the send path. */
1688         if (hw->mac_type == e1000_82544 &&
1689             hw->bus_type == e1000_bus_type_pcix)
1690                 adapter->pcix_82544 = 1;
1691
1692         ew32(TCTL, tctl);
1693
1694 }
1695
1696 /**
1697  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1698  * @adapter: board private structure
1699  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1700  *
1701  * Returns 0 on success, negative on failure
1702  **/
1703
1704 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1705                                     struct e1000_rx_ring *rxdr)
1706 {
1707         struct pci_dev *pdev = adapter->pdev;
1708         int size, desc_len;
1709
1710         size = sizeof(struct e1000_buffer) * rxdr->count;
1711         rxdr->buffer_info = vzalloc(size);
1712         if (!rxdr->buffer_info) {
1713                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1714                       "ring\n");
1715                 return -ENOMEM;
1716         }
1717
1718         desc_len = sizeof(struct e1000_rx_desc);
1719
1720         /* Round up to nearest 4K */
1721
1722         rxdr->size = rxdr->count * desc_len;
1723         rxdr->size = ALIGN(rxdr->size, 4096);
1724
1725         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1726                                         GFP_KERNEL);
1727
1728         if (!rxdr->desc) {
1729                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1730                       "ring\n");
1731 setup_rx_desc_die:
1732                 vfree(rxdr->buffer_info);
1733                 return -ENOMEM;
1734         }
1735
1736         /* Fix for errata 23, can't cross 64kB boundary */
1737         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1738                 void *olddesc = rxdr->desc;
1739                 dma_addr_t olddma = rxdr->dma;
1740                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1741                       rxdr->size, rxdr->desc);
1742                 /* Try again, without freeing the previous */
1743                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1744                                                 &rxdr->dma, GFP_KERNEL);
1745                 /* Failed allocation, critical failure */
1746                 if (!rxdr->desc) {
1747                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1748                                           olddma);
1749                         e_err(probe, "Unable to allocate memory for the Rx "
1750                               "descriptor ring\n");
1751                         goto setup_rx_desc_die;
1752                 }
1753
1754                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1755                         /* give up */
1756                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1757                                           rxdr->dma);
1758                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1759                                           olddma);
1760                         e_err(probe, "Unable to allocate aligned memory for "
1761                               "the Rx descriptor ring\n");
1762                         goto setup_rx_desc_die;
1763                 } else {
1764                         /* Free old allocation, new allocation was successful */
1765                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1766                                           olddma);
1767                 }
1768         }
1769         memset(rxdr->desc, 0, rxdr->size);
1770
1771         rxdr->next_to_clean = 0;
1772         rxdr->next_to_use = 0;
1773         rxdr->rx_skb_top = NULL;
1774
1775         return 0;
1776 }
1777
1778 /**
1779  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1780  *                                (Descriptors) for all queues
1781  * @adapter: board private structure
1782  *
1783  * Return 0 on success, negative on failure
1784  **/
1785
1786 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1787 {
1788         int i, err = 0;
1789
1790         for (i = 0; i < adapter->num_rx_queues; i++) {
1791                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1792                 if (err) {
1793                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1794                         for (i-- ; i >= 0; i--)
1795                                 e1000_free_rx_resources(adapter,
1796                                                         &adapter->rx_ring[i]);
1797                         break;
1798                 }
1799         }
1800
1801         return err;
1802 }
1803
1804 /**
1805  * e1000_setup_rctl - configure the receive control registers
1806  * @adapter: Board private structure
1807  **/
1808 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1809 {
1810         struct e1000_hw *hw = &adapter->hw;
1811         u32 rctl;
1812
1813         rctl = er32(RCTL);
1814
1815         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1816
1817         rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
1818                 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1819                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1820
1821         if (hw->tbi_compatibility_on == 1)
1822                 rctl |= E1000_RCTL_SBP;
1823         else
1824                 rctl &= ~E1000_RCTL_SBP;
1825
1826         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1827                 rctl &= ~E1000_RCTL_LPE;
1828         else
1829                 rctl |= E1000_RCTL_LPE;
1830
1831         /* Setup buffer sizes */
1832         rctl &= ~E1000_RCTL_SZ_4096;
1833         rctl |= E1000_RCTL_BSEX;
1834         switch (adapter->rx_buffer_len) {
1835                 case E1000_RXBUFFER_2048:
1836                 default:
1837                         rctl |= E1000_RCTL_SZ_2048;
1838                         rctl &= ~E1000_RCTL_BSEX;
1839                         break;
1840                 case E1000_RXBUFFER_4096:
1841                         rctl |= E1000_RCTL_SZ_4096;
1842                         break;
1843                 case E1000_RXBUFFER_8192:
1844                         rctl |= E1000_RCTL_SZ_8192;
1845                         break;
1846                 case E1000_RXBUFFER_16384:
1847                         rctl |= E1000_RCTL_SZ_16384;
1848                         break;
1849         }
1850
1851         ew32(RCTL, rctl);
1852 }
1853
1854 /**
1855  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1856  * @adapter: board private structure
1857  *
1858  * Configure the Rx unit of the MAC after a reset.
1859  **/
1860
1861 static void e1000_configure_rx(struct e1000_adapter *adapter)
1862 {
1863         u64 rdba;
1864         struct e1000_hw *hw = &adapter->hw;
1865         u32 rdlen, rctl, rxcsum;
1866
1867         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1868                 rdlen = adapter->rx_ring[0].count *
1869                         sizeof(struct e1000_rx_desc);
1870                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1871                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1872         } else {
1873                 rdlen = adapter->rx_ring[0].count *
1874                         sizeof(struct e1000_rx_desc);
1875                 adapter->clean_rx = e1000_clean_rx_irq;
1876                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1877         }
1878
1879         /* disable receives while setting up the descriptors */
1880         rctl = er32(RCTL);
1881         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1882
1883         /* set the Receive Delay Timer Register */
1884         ew32(RDTR, adapter->rx_int_delay);
1885
1886         if (hw->mac_type >= e1000_82540) {
1887                 ew32(RADV, adapter->rx_abs_int_delay);
1888                 if (adapter->itr_setting != 0)
1889                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1890         }
1891
1892         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1893          * the Base and Length of the Rx Descriptor Ring */
1894         switch (adapter->num_rx_queues) {
1895         case 1:
1896         default:
1897                 rdba = adapter->rx_ring[0].dma;
1898                 ew32(RDLEN, rdlen);
1899                 ew32(RDBAH, (rdba >> 32));
1900                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1901                 ew32(RDT, 0);
1902                 ew32(RDH, 0);
1903                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1904                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1905                 break;
1906         }
1907
1908         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1909         if (hw->mac_type >= e1000_82543) {
1910                 rxcsum = er32(RXCSUM);
1911                 if (adapter->rx_csum)
1912                         rxcsum |= E1000_RXCSUM_TUOFL;
1913                 else
1914                         /* don't need to clear IPPCSE as it defaults to 0 */
1915                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1916                 ew32(RXCSUM, rxcsum);
1917         }
1918
1919         /* Enable Receives */
1920         ew32(RCTL, rctl);
1921 }
1922
1923 /**
1924  * e1000_free_tx_resources - Free Tx Resources per Queue
1925  * @adapter: board private structure
1926  * @tx_ring: Tx descriptor ring for a specific queue
1927  *
1928  * Free all transmit software resources
1929  **/
1930
1931 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1932                                     struct e1000_tx_ring *tx_ring)
1933 {
1934         struct pci_dev *pdev = adapter->pdev;
1935
1936         e1000_clean_tx_ring(adapter, tx_ring);
1937
1938         vfree(tx_ring->buffer_info);
1939         tx_ring->buffer_info = NULL;
1940
1941         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1942                           tx_ring->dma);
1943
1944         tx_ring->desc = NULL;
1945 }
1946
1947 /**
1948  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1949  * @adapter: board private structure
1950  *
1951  * Free all transmit software resources
1952  **/
1953
1954 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1955 {
1956         int i;
1957
1958         for (i = 0; i < adapter->num_tx_queues; i++)
1959                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1960 }
1961
1962 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1963                                              struct e1000_buffer *buffer_info)
1964 {
1965         if (buffer_info->dma) {
1966                 if (buffer_info->mapped_as_page)
1967                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1968                                        buffer_info->length, DMA_TO_DEVICE);
1969                 else
1970                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1971                                          buffer_info->length,
1972                                          DMA_TO_DEVICE);
1973                 buffer_info->dma = 0;
1974         }
1975         if (buffer_info->skb) {
1976                 dev_kfree_skb_any(buffer_info->skb);
1977                 buffer_info->skb = NULL;
1978         }
1979         buffer_info->time_stamp = 0;
1980         /* buffer_info must be completely set up in the transmit path */
1981 }
1982
1983 /**
1984  * e1000_clean_tx_ring - Free Tx Buffers
1985  * @adapter: board private structure
1986  * @tx_ring: ring to be cleaned
1987  **/
1988
1989 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
1990                                 struct e1000_tx_ring *tx_ring)
1991 {
1992         struct e1000_hw *hw = &adapter->hw;
1993         struct e1000_buffer *buffer_info;
1994         unsigned long size;
1995         unsigned int i;
1996
1997         /* Free all the Tx ring sk_buffs */
1998
1999         for (i = 0; i < tx_ring->count; i++) {
2000                 buffer_info = &tx_ring->buffer_info[i];
2001                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2002         }
2003
2004         size = sizeof(struct e1000_buffer) * tx_ring->count;
2005         memset(tx_ring->buffer_info, 0, size);
2006
2007         /* Zero out the descriptor ring */
2008
2009         memset(tx_ring->desc, 0, tx_ring->size);
2010
2011         tx_ring->next_to_use = 0;
2012         tx_ring->next_to_clean = 0;
2013         tx_ring->last_tx_tso = 0;
2014
2015         writel(0, hw->hw_addr + tx_ring->tdh);
2016         writel(0, hw->hw_addr + tx_ring->tdt);
2017 }
2018
2019 /**
2020  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2021  * @adapter: board private structure
2022  **/
2023
2024 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2025 {
2026         int i;
2027
2028         for (i = 0; i < adapter->num_tx_queues; i++)
2029                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2030 }
2031
2032 /**
2033  * e1000_free_rx_resources - Free Rx Resources
2034  * @adapter: board private structure
2035  * @rx_ring: ring to clean the resources from
2036  *
2037  * Free all receive software resources
2038  **/
2039
2040 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2041                                     struct e1000_rx_ring *rx_ring)
2042 {
2043         struct pci_dev *pdev = adapter->pdev;
2044
2045         e1000_clean_rx_ring(adapter, rx_ring);
2046
2047         vfree(rx_ring->buffer_info);
2048         rx_ring->buffer_info = NULL;
2049
2050         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2051                           rx_ring->dma);
2052
2053         rx_ring->desc = NULL;
2054 }
2055
2056 /**
2057  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2058  * @adapter: board private structure
2059  *
2060  * Free all receive software resources
2061  **/
2062
2063 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2064 {
2065         int i;
2066
2067         for (i = 0; i < adapter->num_rx_queues; i++)
2068                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2069 }
2070
2071 /**
2072  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2073  * @adapter: board private structure
2074  * @rx_ring: ring to free buffers from
2075  **/
2076
2077 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2078                                 struct e1000_rx_ring *rx_ring)
2079 {
2080         struct e1000_hw *hw = &adapter->hw;
2081         struct e1000_buffer *buffer_info;
2082         struct pci_dev *pdev = adapter->pdev;
2083         unsigned long size;
2084         unsigned int i;
2085
2086         /* Free all the Rx ring sk_buffs */
2087         for (i = 0; i < rx_ring->count; i++) {
2088                 buffer_info = &rx_ring->buffer_info[i];
2089                 if (buffer_info->dma &&
2090                     adapter->clean_rx == e1000_clean_rx_irq) {
2091                         dma_unmap_single(&pdev->dev, buffer_info->dma,
2092                                          buffer_info->length,
2093                                          DMA_FROM_DEVICE);
2094                 } else if (buffer_info->dma &&
2095                            adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2096                         dma_unmap_page(&pdev->dev, buffer_info->dma,
2097                                        buffer_info->length,
2098                                        DMA_FROM_DEVICE);
2099                 }
2100
2101                 buffer_info->dma = 0;
2102                 if (buffer_info->page) {
2103                         put_page(buffer_info->page);
2104                         buffer_info->page = NULL;
2105                 }
2106                 if (buffer_info->skb) {
2107                         dev_kfree_skb(buffer_info->skb);
2108                         buffer_info->skb = NULL;
2109                 }
2110         }
2111
2112         /* there also may be some cached data from a chained receive */
2113         if (rx_ring->rx_skb_top) {
2114                 dev_kfree_skb(rx_ring->rx_skb_top);
2115                 rx_ring->rx_skb_top = NULL;
2116         }
2117
2118         size = sizeof(struct e1000_buffer) * rx_ring->count;
2119         memset(rx_ring->buffer_info, 0, size);
2120
2121         /* Zero out the descriptor ring */
2122         memset(rx_ring->desc, 0, rx_ring->size);
2123
2124         rx_ring->next_to_clean = 0;
2125         rx_ring->next_to_use = 0;
2126
2127         writel(0, hw->hw_addr + rx_ring->rdh);
2128         writel(0, hw->hw_addr + rx_ring->rdt);
2129 }
2130
2131 /**
2132  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2133  * @adapter: board private structure
2134  **/
2135
2136 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2137 {
2138         int i;
2139
2140         for (i = 0; i < adapter->num_rx_queues; i++)
2141                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2142 }
2143
2144 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2145  * and memory write and invalidate disabled for certain operations
2146  */
2147 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2148 {
2149         struct e1000_hw *hw = &adapter->hw;
2150         struct net_device *netdev = adapter->netdev;
2151         u32 rctl;
2152
2153         e1000_pci_clear_mwi(hw);
2154
2155         rctl = er32(RCTL);
2156         rctl |= E1000_RCTL_RST;
2157         ew32(RCTL, rctl);
2158         E1000_WRITE_FLUSH();
2159         mdelay(5);
2160
2161         if (netif_running(netdev))
2162                 e1000_clean_all_rx_rings(adapter);
2163 }
2164
2165 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2166 {
2167         struct e1000_hw *hw = &adapter->hw;
2168         struct net_device *netdev = adapter->netdev;
2169         u32 rctl;
2170
2171         rctl = er32(RCTL);
2172         rctl &= ~E1000_RCTL_RST;
2173         ew32(RCTL, rctl);
2174         E1000_WRITE_FLUSH();
2175         mdelay(5);
2176
2177         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2178                 e1000_pci_set_mwi(hw);
2179
2180         if (netif_running(netdev)) {
2181                 /* No need to loop, because 82542 supports only 1 queue */
2182                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2183                 e1000_configure_rx(adapter);
2184                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2185         }
2186 }
2187
2188 /**
2189  * e1000_set_mac - Change the Ethernet Address of the NIC
2190  * @netdev: network interface device structure
2191  * @p: pointer to an address structure
2192  *
2193  * Returns 0 on success, negative on failure
2194  **/
2195
2196 static int e1000_set_mac(struct net_device *netdev, void *p)
2197 {
2198         struct e1000_adapter *adapter = netdev_priv(netdev);
2199         struct e1000_hw *hw = &adapter->hw;
2200         struct sockaddr *addr = p;
2201
2202         if (!is_valid_ether_addr(addr->sa_data))
2203                 return -EADDRNOTAVAIL;
2204
2205         /* 82542 2.0 needs to be in reset to write receive address registers */
2206
2207         if (hw->mac_type == e1000_82542_rev2_0)
2208                 e1000_enter_82542_rst(adapter);
2209
2210         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2211         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2212
2213         e1000_rar_set(hw, hw->mac_addr, 0);
2214
2215         if (hw->mac_type == e1000_82542_rev2_0)
2216                 e1000_leave_82542_rst(adapter);
2217
2218         return 0;
2219 }
2220
2221 /**
2222  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2223  * @netdev: network interface device structure
2224  *
2225  * The set_rx_mode entry point is called whenever the unicast or multicast
2226  * address lists or the network interface flags are updated. This routine is
2227  * responsible for configuring the hardware for proper unicast, multicast,
2228  * promiscuous mode, and all-multi behavior.
2229  **/
2230
2231 static void e1000_set_rx_mode(struct net_device *netdev)
2232 {
2233         struct e1000_adapter *adapter = netdev_priv(netdev);
2234         struct e1000_hw *hw = &adapter->hw;
2235         struct netdev_hw_addr *ha;
2236         bool use_uc = false;
2237         u32 rctl;
2238         u32 hash_value;
2239         int i, rar_entries = E1000_RAR_ENTRIES;
2240         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2241         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2242
2243         if (!mcarray) {
2244                 e_err(probe, "memory allocation failed\n");
2245                 return;
2246         }
2247
2248         /* Check for Promiscuous and All Multicast modes */
2249
2250         rctl = er32(RCTL);
2251
2252         if (netdev->flags & IFF_PROMISC) {
2253                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2254                 rctl &= ~E1000_RCTL_VFE;
2255         } else {
2256                 if (netdev->flags & IFF_ALLMULTI)
2257                         rctl |= E1000_RCTL_MPE;
2258                 else
2259                         rctl &= ~E1000_RCTL_MPE;
2260                 /* Enable VLAN filter if there is a VLAN */
2261                 if (e1000_vlan_used(adapter))
2262                         rctl |= E1000_RCTL_VFE;
2263         }
2264
2265         if (netdev_uc_count(netdev) > rar_entries - 1) {
2266                 rctl |= E1000_RCTL_UPE;
2267         } else if (!(netdev->flags & IFF_PROMISC)) {
2268                 rctl &= ~E1000_RCTL_UPE;
2269                 use_uc = true;
2270         }
2271
2272         ew32(RCTL, rctl);
2273
2274         /* 82542 2.0 needs to be in reset to write receive address registers */
2275
2276         if (hw->mac_type == e1000_82542_rev2_0)
2277                 e1000_enter_82542_rst(adapter);
2278
2279         /* load the first 14 addresses into the exact filters 1-14. Unicast
2280          * addresses take precedence to avoid disabling unicast filtering
2281          * when possible.
2282          *
2283          * RAR 0 is used for the station MAC address
2284          * if there are not 14 addresses, go ahead and clear the filters
2285          */
2286         i = 1;
2287         if (use_uc)
2288                 netdev_for_each_uc_addr(ha, netdev) {
2289                         if (i == rar_entries)
2290                                 break;
2291                         e1000_rar_set(hw, ha->addr, i++);
2292                 }
2293
2294         netdev_for_each_mc_addr(ha, netdev) {
2295                 if (i == rar_entries) {
2296                         /* load any remaining addresses into the hash table */
2297                         u32 hash_reg, hash_bit, mta;
2298                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2299                         hash_reg = (hash_value >> 5) & 0x7F;
2300                         hash_bit = hash_value & 0x1F;
2301                         mta = (1 << hash_bit);
2302                         mcarray[hash_reg] |= mta;
2303                 } else {
2304                         e1000_rar_set(hw, ha->addr, i++);
2305                 }
2306         }
2307
2308         for (; i < rar_entries; i++) {
2309                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2310                 E1000_WRITE_FLUSH();
2311                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2312                 E1000_WRITE_FLUSH();
2313         }
2314
2315         /* write the hash table completely, write from bottom to avoid
2316          * both stupid write combining chipsets, and flushing each write */
2317         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2318                 /*
2319                  * If we are on an 82544 has an errata where writing odd
2320                  * offsets overwrites the previous even offset, but writing
2321                  * backwards over the range solves the issue by always
2322                  * writing the odd offset first
2323                  */
2324                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2325         }
2326         E1000_WRITE_FLUSH();
2327
2328         if (hw->mac_type == e1000_82542_rev2_0)
2329                 e1000_leave_82542_rst(adapter);
2330
2331         kfree(mcarray);
2332 }
2333
2334 /* Need to wait a few seconds after link up to get diagnostic information from
2335  * the phy */
2336
2337 static void e1000_update_phy_info(unsigned long data)
2338 {
2339         struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2340         schedule_work(&adapter->phy_info_task);
2341 }
2342
2343 static void e1000_update_phy_info_task(struct work_struct *work)
2344 {
2345         struct e1000_adapter *adapter = container_of(work,
2346                                                      struct e1000_adapter,
2347                                                      phy_info_task);
2348         struct e1000_hw *hw = &adapter->hw;
2349
2350         rtnl_lock();
2351         e1000_phy_get_info(hw, &adapter->phy_info);
2352         rtnl_unlock();
2353 }
2354
2355 /**
2356  * e1000_82547_tx_fifo_stall - Timer Call-back
2357  * @data: pointer to adapter cast into an unsigned long
2358  **/
2359 static void e1000_82547_tx_fifo_stall(unsigned long data)
2360 {
2361         struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2362         schedule_work(&adapter->fifo_stall_task);
2363 }
2364
2365 /**
2366  * e1000_82547_tx_fifo_stall_task - task to complete work
2367  * @work: work struct contained inside adapter struct
2368  **/
2369 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2370 {
2371         struct e1000_adapter *adapter = container_of(work,
2372                                                      struct e1000_adapter,
2373                                                      fifo_stall_task);
2374         struct e1000_hw *hw = &adapter->hw;
2375         struct net_device *netdev = adapter->netdev;
2376         u32 tctl;
2377
2378         rtnl_lock();
2379         if (atomic_read(&adapter->tx_fifo_stall)) {
2380                 if ((er32(TDT) == er32(TDH)) &&
2381                    (er32(TDFT) == er32(TDFH)) &&
2382                    (er32(TDFTS) == er32(TDFHS))) {
2383                         tctl = er32(TCTL);
2384                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2385                         ew32(TDFT, adapter->tx_head_addr);
2386                         ew32(TDFH, adapter->tx_head_addr);
2387                         ew32(TDFTS, adapter->tx_head_addr);
2388                         ew32(TDFHS, adapter->tx_head_addr);
2389                         ew32(TCTL, tctl);
2390                         E1000_WRITE_FLUSH();
2391
2392                         adapter->tx_fifo_head = 0;
2393                         atomic_set(&adapter->tx_fifo_stall, 0);
2394                         netif_wake_queue(netdev);
2395                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2396                         mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
2397                 }
2398         }
2399         rtnl_unlock();
2400 }
2401
2402 bool e1000_has_link(struct e1000_adapter *adapter)
2403 {
2404         struct e1000_hw *hw = &adapter->hw;
2405         bool link_active = false;
2406
2407         /* get_link_status is set on LSC (link status) interrupt or rx
2408          * sequence error interrupt (except on intel ce4100).
2409          * get_link_status will stay false until the
2410          * e1000_check_for_link establishes link for copper adapters
2411          * ONLY
2412          */
2413         switch (hw->media_type) {
2414         case e1000_media_type_copper:
2415                 if (hw->mac_type == e1000_ce4100)
2416                         hw->get_link_status = 1;
2417                 if (hw->get_link_status) {
2418                         e1000_check_for_link(hw);
2419                         link_active = !hw->get_link_status;
2420                 } else {
2421                         link_active = true;
2422                 }
2423                 break;
2424         case e1000_media_type_fiber:
2425                 e1000_check_for_link(hw);
2426                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2427                 break;
2428         case e1000_media_type_internal_serdes:
2429                 e1000_check_for_link(hw);
2430                 link_active = hw->serdes_has_link;
2431                 break;
2432         default:
2433                 break;
2434         }
2435
2436         return link_active;
2437 }
2438
2439 /**
2440  * e1000_watchdog - Timer Call-back
2441  * @data: pointer to adapter cast into an unsigned long
2442  **/
2443 static void e1000_watchdog(unsigned long data)
2444 {
2445         struct e1000_adapter *adapter = (struct e1000_adapter *)data;
2446         struct e1000_hw *hw = &adapter->hw;
2447         struct net_device *netdev = adapter->netdev;
2448         struct e1000_tx_ring *txdr = adapter->tx_ring;
2449         u32 link, tctl;
2450
2451         link = e1000_has_link(adapter);
2452         if ((netif_carrier_ok(netdev)) && link)
2453                 goto link_up;
2454
2455         if (link) {
2456                 if (!netif_carrier_ok(netdev)) {
2457                         u32 ctrl;
2458                         bool txb2b = true;
2459                         /* update snapshot of PHY registers on LSC */
2460                         e1000_get_speed_and_duplex(hw,
2461                                                    &adapter->link_speed,
2462                                                    &adapter->link_duplex);
2463
2464                         ctrl = er32(CTRL);
2465                         pr_info("%s NIC Link is Up %d Mbps %s, "
2466                                 "Flow Control: %s\n",
2467                                 netdev->name,
2468                                 adapter->link_speed,
2469                                 adapter->link_duplex == FULL_DUPLEX ?
2470                                 "Full Duplex" : "Half Duplex",
2471                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2472                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2473                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2474                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2475
2476                         /* adjust timeout factor according to speed/duplex */
2477                         adapter->tx_timeout_factor = 1;
2478                         switch (adapter->link_speed) {
2479                         case SPEED_10:
2480                                 txb2b = false;
2481                                 adapter->tx_timeout_factor = 16;
2482                                 break;
2483                         case SPEED_100:
2484                                 txb2b = false;
2485                                 /* maybe add some timeout factor ? */
2486                                 break;
2487                         }
2488
2489                         /* enable transmits in the hardware */
2490                         tctl = er32(TCTL);
2491                         tctl |= E1000_TCTL_EN;
2492                         ew32(TCTL, tctl);
2493
2494                         netif_carrier_on(netdev);
2495                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2496                                 mod_timer(&adapter->phy_info_timer,
2497                                           round_jiffies(jiffies + 2 * HZ));
2498                         adapter->smartspeed = 0;
2499                 }
2500         } else {
2501                 if (netif_carrier_ok(netdev)) {
2502                         adapter->link_speed = 0;
2503                         adapter->link_duplex = 0;
2504                         pr_info("%s NIC Link is Down\n",
2505                                 netdev->name);
2506                         netif_carrier_off(netdev);
2507
2508                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2509                                 mod_timer(&adapter->phy_info_timer,
2510                                           round_jiffies(jiffies + 2 * HZ));
2511                 }
2512
2513                 e1000_smartspeed(adapter);
2514         }
2515
2516 link_up:
2517         e1000_update_stats(adapter);
2518
2519         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2520         adapter->tpt_old = adapter->stats.tpt;
2521         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2522         adapter->colc_old = adapter->stats.colc;
2523
2524         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2525         adapter->gorcl_old = adapter->stats.gorcl;
2526         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2527         adapter->gotcl_old = adapter->stats.gotcl;
2528
2529         e1000_update_adaptive(hw);
2530
2531         if (!netif_carrier_ok(netdev)) {
2532                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2533                         /* We've lost link, so the controller stops DMA,
2534                          * but we've got queued Tx work that's never going
2535                          * to get done, so reset controller to flush Tx.
2536                          * (Do the reset outside of interrupt context). */
2537                         adapter->tx_timeout_count++;
2538                         schedule_work(&adapter->reset_task);
2539                         /* return immediately since reset is imminent */
2540                         return;
2541                 }
2542         }
2543
2544         /* Simple mode for Interrupt Throttle Rate (ITR) */
2545         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2546                 /*
2547                  * Symmetric Tx/Rx gets a reduced ITR=2000;
2548                  * Total asymmetrical Tx or Rx gets ITR=8000;
2549                  * everyone else is between 2000-8000.
2550                  */
2551                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2552                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2553                             adapter->gotcl - adapter->gorcl :
2554                             adapter->gorcl - adapter->gotcl) / 10000;
2555                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2556
2557                 ew32(ITR, 1000000000 / (itr * 256));
2558         }
2559
2560         /* Cause software interrupt to ensure rx ring is cleaned */
2561         ew32(ICS, E1000_ICS_RXDMT0);
2562
2563         /* Force detection of hung controller every watchdog period */
2564         adapter->detect_tx_hung = true;
2565
2566         /* Reset the timer */
2567         if (!test_bit(__E1000_DOWN, &adapter->flags))
2568                 mod_timer(&adapter->watchdog_timer,
2569                           round_jiffies(jiffies + 2 * HZ));
2570 }
2571
2572 enum latency_range {
2573         lowest_latency = 0,
2574         low_latency = 1,
2575         bulk_latency = 2,
2576         latency_invalid = 255
2577 };
2578
2579 /**
2580  * e1000_update_itr - update the dynamic ITR value based on statistics
2581  * @adapter: pointer to adapter
2582  * @itr_setting: current adapter->itr
2583  * @packets: the number of packets during this measurement interval
2584  * @bytes: the number of bytes during this measurement interval
2585  *
2586  *      Stores a new ITR value based on packets and byte
2587  *      counts during the last interrupt.  The advantage of per interrupt
2588  *      computation is faster updates and more accurate ITR for the current
2589  *      traffic pattern.  Constants in this function were computed
2590  *      based on theoretical maximum wire speed and thresholds were set based
2591  *      on testing data as well as attempting to minimize response time
2592  *      while increasing bulk throughput.
2593  *      this functionality is controlled by the InterruptThrottleRate module
2594  *      parameter (see e1000_param.c)
2595  **/
2596 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2597                                      u16 itr_setting, int packets, int bytes)
2598 {
2599         unsigned int retval = itr_setting;
2600         struct e1000_hw *hw = &adapter->hw;
2601
2602         if (unlikely(hw->mac_type < e1000_82540))
2603                 goto update_itr_done;
2604
2605         if (packets == 0)
2606                 goto update_itr_done;
2607
2608         switch (itr_setting) {
2609         case lowest_latency:
2610                 /* jumbo frames get bulk treatment*/
2611                 if (bytes/packets > 8000)
2612                         retval = bulk_latency;
2613                 else if ((packets < 5) && (bytes > 512))
2614                         retval = low_latency;
2615                 break;
2616         case low_latency:  /* 50 usec aka 20000 ints/s */
2617                 if (bytes > 10000) {
2618                         /* jumbo frames need bulk latency setting */
2619                         if (bytes/packets > 8000)
2620                                 retval = bulk_latency;
2621                         else if ((packets < 10) || ((bytes/packets) > 1200))
2622                                 retval = bulk_latency;
2623                         else if ((packets > 35))
2624                                 retval = lowest_latency;
2625                 } else if (bytes/packets > 2000)
2626                         retval = bulk_latency;
2627                 else if (packets <= 2 && bytes < 512)
2628                         retval = lowest_latency;
2629                 break;
2630         case bulk_latency: /* 250 usec aka 4000 ints/s */
2631                 if (bytes > 25000) {
2632                         if (packets > 35)
2633                                 retval = low_latency;
2634                 } else if (bytes < 6000) {
2635                         retval = low_latency;
2636                 }
2637                 break;
2638         }
2639
2640 update_itr_done:
2641         return retval;
2642 }
2643
2644 static void e1000_set_itr(struct e1000_adapter *adapter)
2645 {
2646         struct e1000_hw *hw = &adapter->hw;
2647         u16 current_itr;
2648         u32 new_itr = adapter->itr;
2649
2650         if (unlikely(hw->mac_type < e1000_82540))
2651                 return;
2652
2653         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2654         if (unlikely(adapter->link_speed != SPEED_1000)) {
2655                 current_itr = 0;
2656                 new_itr = 4000;
2657                 goto set_itr_now;
2658         }
2659
2660         adapter->tx_itr = e1000_update_itr(adapter,
2661                                     adapter->tx_itr,
2662                                     adapter->total_tx_packets,
2663                                     adapter->total_tx_bytes);
2664         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2665         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2666                 adapter->tx_itr = low_latency;
2667
2668         adapter->rx_itr = e1000_update_itr(adapter,
2669                                     adapter->rx_itr,
2670                                     adapter->total_rx_packets,
2671                                     adapter->total_rx_bytes);
2672         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2673         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2674                 adapter->rx_itr = low_latency;
2675
2676         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2677
2678         switch (current_itr) {
2679         /* counts and packets in update_itr are dependent on these numbers */
2680         case lowest_latency:
2681                 new_itr = 70000;
2682                 break;
2683         case low_latency:
2684                 new_itr = 20000; /* aka hwitr = ~200 */
2685                 break;
2686         case bulk_latency:
2687                 new_itr = 4000;
2688                 break;
2689         default:
2690                 break;
2691         }
2692
2693 set_itr_now:
2694         if (new_itr != adapter->itr) {
2695                 /* this attempts to bias the interrupt rate towards Bulk
2696                  * by adding intermediate steps when interrupt rate is
2697                  * increasing */
2698                 new_itr = new_itr > adapter->itr ?
2699                              min(adapter->itr + (new_itr >> 2), new_itr) :
2700                              new_itr;
2701                 adapter->itr = new_itr;
2702                 ew32(ITR, 1000000000 / (new_itr * 256));
2703         }
2704 }
2705
2706 #define E1000_TX_FLAGS_CSUM             0x00000001
2707 #define E1000_TX_FLAGS_VLAN             0x00000002
2708 #define E1000_TX_FLAGS_TSO              0x00000004
2709 #define E1000_TX_FLAGS_IPV4             0x00000008
2710 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2711 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2712
2713 static int e1000_tso(struct e1000_adapter *adapter,
2714                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2715 {
2716         struct e1000_context_desc *context_desc;
2717         struct e1000_buffer *buffer_info;
2718         unsigned int i;
2719         u32 cmd_length = 0;
2720         u16 ipcse = 0, tucse, mss;
2721         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2722         int err;
2723
2724         if (skb_is_gso(skb)) {
2725                 if (skb_header_cloned(skb)) {
2726                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2727                         if (err)
2728                                 return err;
2729                 }
2730
2731                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2732                 mss = skb_shinfo(skb)->gso_size;
2733                 if (skb->protocol == htons(ETH_P_IP)) {
2734                         struct iphdr *iph = ip_hdr(skb);
2735                         iph->tot_len = 0;
2736                         iph->check = 0;
2737                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2738                                                                  iph->daddr, 0,
2739                                                                  IPPROTO_TCP,
2740                                                                  0);
2741                         cmd_length = E1000_TXD_CMD_IP;
2742                         ipcse = skb_transport_offset(skb) - 1;
2743                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2744                         ipv6_hdr(skb)->payload_len = 0;
2745                         tcp_hdr(skb)->check =
2746                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2747                                                  &ipv6_hdr(skb)->daddr,
2748                                                  0, IPPROTO_TCP, 0);
2749                         ipcse = 0;
2750                 }
2751                 ipcss = skb_network_offset(skb);
2752                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2753                 tucss = skb_transport_offset(skb);
2754                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2755                 tucse = 0;
2756
2757                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2758                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2759
2760                 i = tx_ring->next_to_use;
2761                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2762                 buffer_info = &tx_ring->buffer_info[i];
2763
2764                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2765                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2766                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2767                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2768                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2769                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2770                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2771                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2772                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2773
2774                 buffer_info->time_stamp = jiffies;
2775                 buffer_info->next_to_watch = i;
2776
2777                 if (++i == tx_ring->count) i = 0;
2778                 tx_ring->next_to_use = i;
2779
2780                 return true;
2781         }
2782         return false;
2783 }
2784
2785 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2786                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2787 {
2788         struct e1000_context_desc *context_desc;
2789         struct e1000_buffer *buffer_info;
2790         unsigned int i;
2791         u8 css;
2792         u32 cmd_len = E1000_TXD_CMD_DEXT;
2793
2794         if (skb->ip_summed != CHECKSUM_PARTIAL)
2795                 return false;
2796
2797         switch (skb->protocol) {
2798         case cpu_to_be16(ETH_P_IP):
2799                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2800                         cmd_len |= E1000_TXD_CMD_TCP;
2801                 break;
2802         case cpu_to_be16(ETH_P_IPV6):
2803                 /* XXX not handling all IPV6 headers */
2804                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2805                         cmd_len |= E1000_TXD_CMD_TCP;
2806                 break;
2807         default:
2808                 if (unlikely(net_ratelimit()))
2809                         e_warn(drv, "checksum_partial proto=%x!\n",
2810                                skb->protocol);
2811                 break;
2812         }
2813
2814         css = skb_checksum_start_offset(skb);
2815
2816         i = tx_ring->next_to_use;
2817         buffer_info = &tx_ring->buffer_info[i];
2818         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2819
2820         context_desc->lower_setup.ip_config = 0;
2821         context_desc->upper_setup.tcp_fields.tucss = css;
2822         context_desc->upper_setup.tcp_fields.tucso =
2823                 css + skb->csum_offset;
2824         context_desc->upper_setup.tcp_fields.tucse = 0;
2825         context_desc->tcp_seg_setup.data = 0;
2826         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2827
2828         buffer_info->time_stamp = jiffies;
2829         buffer_info->next_to_watch = i;
2830
2831         if (unlikely(++i == tx_ring->count)) i = 0;
2832         tx_ring->next_to_use = i;
2833
2834         return true;
2835 }
2836
2837 #define E1000_MAX_TXD_PWR       12
2838 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2839
2840 static int e1000_tx_map(struct e1000_adapter *adapter,
2841                         struct e1000_tx_ring *tx_ring,
2842                         struct sk_buff *skb, unsigned int first,
2843                         unsigned int max_per_txd, unsigned int nr_frags,
2844                         unsigned int mss)
2845 {
2846         struct e1000_hw *hw = &adapter->hw;
2847         struct pci_dev *pdev = adapter->pdev;
2848         struct e1000_buffer *buffer_info;
2849         unsigned int len = skb_headlen(skb);
2850         unsigned int offset = 0, size, count = 0, i;
2851         unsigned int f, bytecount, segs;
2852
2853         i = tx_ring->next_to_use;
2854
2855         while (len) {
2856                 buffer_info = &tx_ring->buffer_info[i];
2857                 size = min(len, max_per_txd);
2858                 /* Workaround for Controller erratum --
2859                  * descriptor for non-tso packet in a linear SKB that follows a
2860                  * tso gets written back prematurely before the data is fully
2861                  * DMA'd to the controller */
2862                 if (!skb->data_len && tx_ring->last_tx_tso &&
2863                     !skb_is_gso(skb)) {
2864                         tx_ring->last_tx_tso = 0;
2865                         size -= 4;
2866                 }
2867
2868                 /* Workaround for premature desc write-backs
2869                  * in TSO mode.  Append 4-byte sentinel desc */
2870                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2871                         size -= 4;
2872                 /* work-around for errata 10 and it applies
2873                  * to all controllers in PCI-X mode
2874                  * The fix is to make sure that the first descriptor of a
2875                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2876                  */
2877                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2878                                 (size > 2015) && count == 0))
2879                         size = 2015;
2880
2881                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2882                  * terminating buffers within evenly-aligned dwords. */
2883                 if (unlikely(adapter->pcix_82544 &&
2884                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2885                    size > 4))
2886                         size -= 4;
2887
2888                 buffer_info->length = size;
2889                 /* set time_stamp *before* dma to help avoid a possible race */
2890                 buffer_info->time_stamp = jiffies;
2891                 buffer_info->mapped_as_page = false;
2892                 buffer_info->dma = dma_map_single(&pdev->dev,
2893                                                   skb->data + offset,
2894                                                   size, DMA_TO_DEVICE);
2895                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2896                         goto dma_error;
2897                 buffer_info->next_to_watch = i;
2898
2899                 len -= size;
2900                 offset += size;
2901                 count++;
2902                 if (len) {
2903                         i++;
2904                         if (unlikely(i == tx_ring->count))
2905                                 i = 0;
2906                 }
2907         }
2908
2909         for (f = 0; f < nr_frags; f++) {
2910                 struct skb_frag_struct *frag;
2911
2912                 frag = &skb_shinfo(skb)->frags[f];
2913                 len = frag->size;
2914                 offset = frag->page_offset;
2915
2916                 while (len) {
2917                         i++;
2918                         if (unlikely(i == tx_ring->count))
2919                                 i = 0;
2920
2921                         buffer_info = &tx_ring->buffer_info[i];
2922                         size = min(len, max_per_txd);
2923                         /* Workaround for premature desc write-backs
2924                          * in TSO mode.  Append 4-byte sentinel desc */
2925                         if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2926                                 size -= 4;
2927                         /* Workaround for potential 82544 hang in PCI-X.
2928                          * Avoid terminating buffers within evenly-aligned
2929                          * dwords. */
2930                         if (unlikely(adapter->pcix_82544 &&
2931                             !((unsigned long)(page_to_phys(frag->page) + offset
2932                                               + size - 1) & 4) &&
2933                             size > 4))
2934                                 size -= 4;
2935
2936                         buffer_info->length = size;
2937                         buffer_info->time_stamp = jiffies;
2938                         buffer_info->mapped_as_page = true;
2939                         buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
2940                                                         offset, size,
2941                                                         DMA_TO_DEVICE);
2942                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2943                                 goto dma_error;
2944                         buffer_info->next_to_watch = i;
2945
2946                         len -= size;
2947                         offset += size;
2948                         count++;
2949                 }
2950         }
2951
2952         segs = skb_shinfo(skb)->gso_segs ?: 1;
2953         /* multiply data chunks by size of headers */
2954         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2955
2956         tx_ring->buffer_info[i].skb = skb;
2957         tx_ring->buffer_info[i].segs = segs;
2958         tx_ring->buffer_info[i].bytecount = bytecount;
2959         tx_ring->buffer_info[first].next_to_watch = i;
2960
2961         return count;
2962
2963 dma_error:
2964         dev_err(&pdev->dev, "TX DMA map failed\n");
2965         buffer_info->dma = 0;
2966         if (count)
2967                 count--;
2968
2969         while (count--) {
2970                 if (i==0)
2971                         i += tx_ring->count;
2972                 i--;
2973                 buffer_info = &tx_ring->buffer_info[i];
2974                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2975         }
2976
2977         return 0;
2978 }
2979
2980 static void e1000_tx_queue(struct e1000_adapter *adapter,
2981                            struct e1000_tx_ring *tx_ring, int tx_flags,
2982                            int count)
2983 {
2984         struct e1000_hw *hw = &adapter->hw;
2985         struct e1000_tx_desc *tx_desc = NULL;
2986         struct e1000_buffer *buffer_info;
2987         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
2988         unsigned int i;
2989
2990         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
2991                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
2992                              E1000_TXD_CMD_TSE;
2993                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
2994
2995                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
2996                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
2997         }
2998
2999         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3000                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3001                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3002         }
3003
3004         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3005                 txd_lower |= E1000_TXD_CMD_VLE;
3006                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3007         }
3008
3009         i = tx_ring->next_to_use;
3010
3011         while (count--) {
3012                 buffer_info = &tx_ring->buffer_info[i];
3013                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3014                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3015                 tx_desc->lower.data =
3016                         cpu_to_le32(txd_lower | buffer_info->length);
3017                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3018                 if (unlikely(++i == tx_ring->count)) i = 0;
3019         }
3020
3021         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3022
3023         /* Force memory writes to complete before letting h/w
3024          * know there are new descriptors to fetch.  (Only
3025          * applicable for weak-ordered memory model archs,
3026          * such as IA-64). */
3027         wmb();
3028
3029         tx_ring->next_to_use = i;
3030         writel(i, hw->hw_addr + tx_ring->tdt);
3031         /* we need this if more than one processor can write to our tail
3032          * at a time, it syncronizes IO on IA64/Altix systems */
3033         mmiowb();
3034 }
3035
3036 /**
3037  * 82547 workaround to avoid controller hang in half-duplex environment.
3038  * The workaround is to avoid queuing a large packet that would span
3039  * the internal Tx FIFO ring boundary by notifying the stack to resend
3040  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3041  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3042  * to the beginning of the Tx FIFO.
3043  **/
3044
3045 #define E1000_FIFO_HDR                  0x10
3046 #define E1000_82547_PAD_LEN             0x3E0
3047
3048 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3049                                        struct sk_buff *skb)
3050 {
3051         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3052         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3053
3054         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3055
3056         if (adapter->link_duplex != HALF_DUPLEX)
3057                 goto no_fifo_stall_required;
3058
3059         if (atomic_read(&adapter->tx_fifo_stall))
3060                 return 1;
3061
3062         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3063                 atomic_set(&adapter->tx_fifo_stall, 1);
3064                 return 1;
3065         }
3066
3067 no_fifo_stall_required:
3068         adapter->tx_fifo_head += skb_fifo_len;
3069         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3070                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3071         return 0;
3072 }
3073
3074 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3075 {
3076         struct e1000_adapter *adapter = netdev_priv(netdev);
3077         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3078
3079         netif_stop_queue(netdev);
3080         /* Herbert's original patch had:
3081          *  smp_mb__after_netif_stop_queue();
3082          * but since that doesn't exist yet, just open code it. */
3083         smp_mb();
3084
3085         /* We need to check again in a case another CPU has just
3086          * made room available. */
3087         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3088                 return -EBUSY;
3089
3090         /* A reprieve! */
3091         netif_start_queue(netdev);
3092         ++adapter->restart_queue;
3093         return 0;
3094 }
3095
3096 static int e1000_maybe_stop_tx(struct net_device *netdev,
3097                                struct e1000_tx_ring *tx_ring, int size)
3098 {
3099         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3100                 return 0;
3101         return __e1000_maybe_stop_tx(netdev, size);
3102 }
3103
3104 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3105 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3106                                     struct net_device *netdev)
3107 {
3108         struct e1000_adapter *adapter = netdev_priv(netdev);
3109         struct e1000_hw *hw = &adapter->hw;
3110         struct e1000_tx_ring *tx_ring;
3111         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3112         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3113         unsigned int tx_flags = 0;
3114         unsigned int len = skb_headlen(skb);
3115         unsigned int nr_frags;
3116         unsigned int mss;
3117         int count = 0;
3118         int tso;
3119         unsigned int f;
3120
3121         /* This goes back to the question of how to logically map a tx queue
3122          * to a flow.  Right now, performance is impacted slightly negatively
3123          * if using multiple tx queues.  If the stack breaks away from a
3124          * single qdisc implementation, we can look at this again. */
3125         tx_ring = adapter->tx_ring;
3126
3127         if (unlikely(skb->len <= 0)) {
3128                 dev_kfree_skb_any(skb);
3129                 return NETDEV_TX_OK;
3130         }
3131
3132         mss = skb_shinfo(skb)->gso_size;
3133         /* The controller does a simple calculation to
3134          * make sure there is enough room in the FIFO before
3135          * initiating the DMA for each buffer.  The calc is:
3136          * 4 = ceil(buffer len/mss).  To make sure we don't
3137          * overrun the FIFO, adjust the max buffer len if mss
3138          * drops. */
3139         if (mss) {
3140                 u8 hdr_len;
3141                 max_per_txd = min(mss << 2, max_per_txd);
3142                 max_txd_pwr = fls(max_per_txd) - 1;
3143
3144                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3145                 if (skb->data_len && hdr_len == len) {
3146                         switch (hw->mac_type) {
3147                                 unsigned int pull_size;
3148                         case e1000_82544:
3149                                 /* Make sure we have room to chop off 4 bytes,
3150                                  * and that the end alignment will work out to
3151                                  * this hardware's requirements
3152                                  * NOTE: this is a TSO only workaround
3153                                  * if end byte alignment not correct move us
3154                                  * into the next dword */
3155                                 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3156                                         break;
3157                                 /* fall through */
3158                                 pull_size = min((unsigned int)4, skb->data_len);
3159                                 if (!__pskb_pull_tail(skb, pull_size)) {
3160                                         e_err(drv, "__pskb_pull_tail "
3161                                               "failed.\n");
3162                                         dev_kfree_skb_any(skb);
3163                                         return NETDEV_TX_OK;
3164                                 }
3165                                 len = skb_headlen(skb);
3166                                 break;
3167                         default:
3168                                 /* do nothing */
3169                                 break;
3170                         }
3171                 }
3172         }
3173
3174         /* reserve a descriptor for the offload context */
3175         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3176                 count++;
3177         count++;
3178
3179         /* Controller Erratum workaround */
3180         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3181                 count++;
3182
3183         count += TXD_USE_COUNT(len, max_txd_pwr);
3184
3185         if (adapter->pcix_82544)
3186                 count++;
3187
3188         /* work-around for errata 10 and it applies to all controllers
3189          * in PCI-X mode, so add one more descriptor to the count
3190          */
3191         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3192                         (len > 2015)))
3193                 count++;
3194
3195         nr_frags = skb_shinfo(skb)->nr_frags;
3196         for (f = 0; f < nr_frags; f++)
3197                 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
3198                                        max_txd_pwr);
3199         if (adapter->pcix_82544)
3200                 count += nr_frags;
3201
3202         /* need: count + 2 desc gap to keep tail from touching
3203          * head, otherwise try next time */
3204         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3205                 return NETDEV_TX_BUSY;
3206
3207         if (unlikely(hw->mac_type == e1000_82547)) {
3208                 if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
3209                         netif_stop_queue(netdev);
3210                         if (!test_bit(__E1000_DOWN, &adapter->flags))
3211                                 mod_timer(&adapter->tx_fifo_stall_timer,
3212                                           jiffies + 1);
3213                         return NETDEV_TX_BUSY;
3214                 }
3215         }
3216
3217         if (vlan_tx_tag_present(skb)) {
3218                 tx_flags |= E1000_TX_FLAGS_VLAN;
3219                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3220         }
3221
3222         first = tx_ring->next_to_use;
3223
3224         tso = e1000_tso(adapter, tx_ring, skb);
3225         if (tso < 0) {
3226                 dev_kfree_skb_any(skb);
3227                 return NETDEV_TX_OK;
3228         }
3229
3230         if (likely(tso)) {
3231                 if (likely(hw->mac_type != e1000_82544))
3232                         tx_ring->last_tx_tso = 1;
3233                 tx_flags |= E1000_TX_FLAGS_TSO;
3234         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3235                 tx_flags |= E1000_TX_FLAGS_CSUM;
3236
3237         if (likely(skb->protocol == htons(ETH_P_IP)))
3238                 tx_flags |= E1000_TX_FLAGS_IPV4;
3239
3240         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3241                              nr_frags, mss);
3242
3243         if (count) {
3244                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3245                 /* Make sure there is space in the ring for the next send. */
3246                 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3247
3248         } else {
3249                 dev_kfree_skb_any(skb);
3250                 tx_ring->buffer_info[first].time_stamp = 0;
3251                 tx_ring->next_to_use = first;
3252         }
3253
3254         return NETDEV_TX_OK;
3255 }
3256
3257 /**
3258  * e1000_tx_timeout - Respond to a Tx Hang
3259  * @netdev: network interface device structure
3260  **/
3261
3262 static void e1000_tx_timeout(struct net_device *netdev)
3263 {
3264         struct e1000_adapter *adapter = netdev_priv(netdev);
3265
3266         /* Do the reset outside of interrupt context */
3267         adapter->tx_timeout_count++;
3268         schedule_work(&adapter->reset_task);
3269 }
3270
3271 static void e1000_reset_task(struct work_struct *work)
3272 {
3273         struct e1000_adapter *adapter =
3274                 container_of(work, struct e1000_adapter, reset_task);
3275
3276         e1000_reinit_safe(adapter);
3277 }
3278
3279 /**
3280  * e1000_get_stats - Get System Network Statistics
3281  * @netdev: network interface device structure
3282  *
3283  * Returns the address of the device statistics structure.
3284  * The statistics are actually updated from the timer callback.
3285  **/
3286
3287 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3288 {
3289         /* only return the current stats */
3290         return &netdev->stats;
3291 }
3292
3293 /**
3294  * e1000_change_mtu - Change the Maximum Transfer Unit
3295  * @netdev: network interface device structure
3296  * @new_mtu: new value for maximum frame size
3297  *
3298  * Returns 0 on success, negative on failure
3299  **/
3300
3301 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3302 {
3303         struct e1000_adapter *adapter = netdev_priv(netdev);
3304         struct e1000_hw *hw = &adapter->hw;
3305         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3306
3307         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3308             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3309                 e_err(probe, "Invalid MTU setting\n");
3310                 return -EINVAL;
3311         }
3312
3313         /* Adapter-specific max frame size limits. */
3314         switch (hw->mac_type) {
3315         case e1000_undefined ... e1000_82542_rev2_1:
3316                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3317                         e_err(probe, "Jumbo Frames not supported.\n");
3318                         return -EINVAL;
3319                 }
3320                 break;
3321         default:
3322                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3323                 break;
3324         }
3325
3326         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3327                 msleep(1);
3328         /* e1000_down has a dependency on max_frame_size */
3329         hw->max_frame_size = max_frame;
3330         if (netif_running(netdev))
3331                 e1000_down(adapter);
3332
3333         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3334          * means we reserve 2 more, this pushes us to allocate from the next
3335          * larger slab size.
3336          * i.e. RXBUFFER_2048 --> size-4096 slab
3337          *  however with the new *_jumbo_rx* routines, jumbo receives will use
3338          *  fragmented skbs */
3339
3340         if (max_frame <= E1000_RXBUFFER_2048)
3341                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3342         else
3343 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3344                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3345 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3346                 adapter->rx_buffer_len = PAGE_SIZE;
3347 #endif
3348
3349         /* adjust allocation if LPE protects us, and we aren't using SBP */
3350         if (!hw->tbi_compatibility_on &&
3351             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3352              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3353                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3354
3355         pr_info("%s changing MTU from %d to %d\n",
3356                 netdev->name, netdev->mtu, new_mtu);
3357         netdev->mtu = new_mtu;
3358
3359         if (netif_running(netdev))
3360                 e1000_up(adapter);
3361         else
3362                 e1000_reset(adapter);
3363
3364         clear_bit(__E1000_RESETTING, &adapter->flags);
3365
3366         return 0;
3367 }
3368
3369 /**
3370  * e1000_update_stats - Update the board statistics counters
3371  * @adapter: board private structure
3372  **/
3373
3374 void e1000_update_stats(struct e1000_adapter *adapter)
3375 {
3376         struct net_device *netdev = adapter->netdev;
3377         struct e1000_hw *hw = &adapter->hw;
3378         struct pci_dev *pdev = adapter->pdev;
3379         unsigned long flags;
3380         u16 phy_tmp;
3381
3382 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3383
3384         /*
3385          * Prevent stats update while adapter is being reset, or if the pci
3386          * connection is down.
3387          */
3388         if (adapter->link_speed == 0)
3389                 return;
3390         if (pci_channel_offline(pdev))
3391                 return;
3392
3393         spin_lock_irqsave(&adapter->stats_lock, flags);
3394
3395         /* these counters are modified from e1000_tbi_adjust_stats,
3396          * called from the interrupt context, so they must only
3397          * be written while holding adapter->stats_lock
3398          */
3399
3400         adapter->stats.crcerrs += er32(CRCERRS);
3401         adapter->stats.gprc += er32(GPRC);
3402         adapter->stats.gorcl += er32(GORCL);
3403         adapter->stats.gorch += er32(GORCH);
3404         adapter->stats.bprc += er32(BPRC);
3405         adapter->stats.mprc += er32(MPRC);
3406         adapter->stats.roc += er32(ROC);
3407
3408         adapter->stats.prc64 += er32(PRC64);
3409         adapter->stats.prc127 += er32(PRC127);
3410         adapter->stats.prc255 += er32(PRC255);
3411         adapter->stats.prc511 += er32(PRC511);
3412         adapter->stats.prc1023 += er32(PRC1023);
3413         adapter->stats.prc1522 += er32(PRC1522);
3414
3415         adapter->stats.symerrs += er32(SYMERRS);
3416         adapter->stats.mpc += er32(MPC);
3417         adapter->stats.scc += er32(SCC);
3418         adapter->stats.ecol += er32(ECOL);
3419         adapter->stats.mcc += er32(MCC);
3420         adapter->stats.latecol += er32(LATECOL);
3421         adapter->stats.dc += er32(DC);
3422         adapter->stats.sec += er32(SEC);
3423         adapter->stats.rlec += er32(RLEC);
3424         adapter->stats.xonrxc += er32(XONRXC);
3425         adapter->stats.xontxc += er32(XONTXC);
3426         adapter->stats.xoffrxc += er32(XOFFRXC);
3427         adapter->stats.xofftxc += er32(XOFFTXC);
3428         adapter->stats.fcruc += er32(FCRUC);
3429         adapter->stats.gptc += er32(GPTC);
3430         adapter->stats.gotcl += er32(GOTCL);
3431         adapter->stats.gotch += er32(GOTCH);
3432         adapter->stats.rnbc += er32(RNBC);
3433         adapter->stats.ruc += er32(RUC);
3434         adapter->stats.rfc += er32(RFC);
3435         adapter->stats.rjc += er32(RJC);
3436         adapter->stats.torl += er32(TORL);
3437         adapter->stats.torh += er32(TORH);
3438         adapter->stats.totl += er32(TOTL);
3439         adapter->stats.toth += er32(TOTH);
3440         adapter->stats.tpr += er32(TPR);
3441
3442         adapter->stats.ptc64 += er32(PTC64);
3443         adapter->stats.ptc127 += er32(PTC127);
3444         adapter->stats.ptc255 += er32(PTC255);
3445         adapter->stats.ptc511 += er32(PTC511);
3446         adapter->stats.ptc1023 += er32(PTC1023);
3447         adapter->stats.ptc1522 += er32(PTC1522);
3448
3449         adapter->stats.mptc += er32(MPTC);
3450         adapter->stats.bptc += er32(BPTC);
3451
3452         /* used for adaptive IFS */
3453
3454         hw->tx_packet_delta = er32(TPT);
3455         adapter->stats.tpt += hw->tx_packet_delta;
3456         hw->collision_delta = er32(COLC);
3457         adapter->stats.colc += hw->collision_delta;
3458
3459         if (hw->mac_type >= e1000_82543) {
3460                 adapter->stats.algnerrc += er32(ALGNERRC);
3461                 adapter->stats.rxerrc += er32(RXERRC);
3462                 adapter->stats.tncrs += er32(TNCRS);
3463                 adapter->stats.cexterr += er32(CEXTERR);
3464                 adapter->stats.tsctc += er32(TSCTC);
3465                 adapter->stats.tsctfc += er32(TSCTFC);
3466         }
3467
3468         /* Fill out the OS statistics structure */
3469         netdev->stats.multicast = adapter->stats.mprc;
3470         netdev->stats.collisions = adapter->stats.colc;
3471
3472         /* Rx Errors */
3473
3474         /* RLEC on some newer hardware can be incorrect so build
3475         * our own version based on RUC and ROC */
3476         netdev->stats.rx_errors = adapter->stats.rxerrc +
3477                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3478                 adapter->stats.ruc + adapter->stats.roc +
3479                 adapter->stats.cexterr;
3480         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3481         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3482         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3483         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3484         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3485
3486         /* Tx Errors */
3487         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3488         netdev->stats.tx_errors = adapter->stats.txerrc;
3489         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3490         netdev->stats.tx_window_errors = adapter->stats.latecol;
3491         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3492         if (hw->bad_tx_carr_stats_fd &&
3493             adapter->link_duplex == FULL_DUPLEX) {
3494                 netdev->stats.tx_carrier_errors = 0;
3495                 adapter->stats.tncrs = 0;
3496         }
3497
3498         /* Tx Dropped needs to be maintained elsewhere */
3499
3500         /* Phy Stats */
3501         if (hw->media_type == e1000_media_type_copper) {
3502                 if ((adapter->link_speed == SPEED_1000) &&
3503                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3504                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3505                         adapter->phy_stats.idle_errors += phy_tmp;
3506                 }
3507
3508                 if ((hw->mac_type <= e1000_82546) &&
3509                    (hw->phy_type == e1000_phy_m88) &&
3510                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3511                         adapter->phy_stats.receive_errors += phy_tmp;
3512         }
3513
3514         /* Management Stats */
3515         if (hw->has_smbus) {
3516                 adapter->stats.mgptc += er32(MGTPTC);
3517                 adapter->stats.mgprc += er32(MGTPRC);
3518                 adapter->stats.mgpdc += er32(MGTPDC);
3519         }
3520
3521         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3522 }
3523
3524 /**
3525  * e1000_intr - Interrupt Handler
3526  * @irq: interrupt number
3527  * @data: pointer to a network interface device structure
3528  **/
3529
3530 static irqreturn_t e1000_intr(int irq, void *data)
3531 {
3532         struct net_device *netdev = data;
3533         struct e1000_adapter *adapter = netdev_priv(netdev);
3534         struct e1000_hw *hw = &adapter->hw;
3535         u32 icr = er32(ICR);
3536
3537         if (unlikely((!icr)))
3538                 return IRQ_NONE;  /* Not our interrupt */
3539
3540         /*
3541          * we might have caused the interrupt, but the above
3542          * read cleared it, and just in case the driver is
3543          * down there is nothing to do so return handled
3544          */
3545         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3546                 return IRQ_HANDLED;
3547
3548         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3549                 hw->get_link_status = 1;
3550                 /* guard against interrupt when we're going down */
3551                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3552                         mod_timer(&adapter->watchdog_timer, jiffies + 1);
3553         }
3554
3555         /* disable interrupts, without the synchronize_irq bit */
3556         ew32(IMC, ~0);
3557         E1000_WRITE_FLUSH();
3558
3559         if (likely(napi_schedule_prep(&adapter->napi))) {
3560                 adapter->total_tx_bytes = 0;
3561                 adapter->total_tx_packets = 0;
3562                 adapter->total_rx_bytes = 0;
3563                 adapter->total_rx_packets = 0;
3564                 __napi_schedule(&adapter->napi);
3565         } else {
3566                 /* this really should not happen! if it does it is basically a
3567                  * bug, but not a hard error, so enable ints and continue */
3568                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3569                         e1000_irq_enable(adapter);
3570         }
3571
3572         return IRQ_HANDLED;
3573 }
3574
3575 /**
3576  * e1000_clean - NAPI Rx polling callback
3577  * @adapter: board private structure
3578  **/
3579 static int e1000_clean(struct napi_struct *napi, int budget)
3580 {
3581         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3582         int tx_clean_complete = 0, work_done = 0;
3583
3584         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3585
3586         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3587
3588         if (!tx_clean_complete)
3589                 work_done = budget;
3590
3591         /* If budget not fully consumed, exit the polling mode */
3592         if (work_done < budget) {
3593                 if (likely(adapter->itr_setting & 3))
3594                         e1000_set_itr(adapter);
3595                 napi_complete(napi);
3596                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3597                         e1000_irq_enable(adapter);
3598         }
3599
3600         return work_done;
3601 }
3602
3603 /**
3604  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3605  * @adapter: board private structure
3606  **/
3607 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3608                                struct e1000_tx_ring *tx_ring)
3609 {
3610         struct e1000_hw *hw = &adapter->hw;
3611         struct net_device *netdev = adapter->netdev;
3612         struct e1000_tx_desc *tx_desc, *eop_desc;
3613         struct e1000_buffer *buffer_info;
3614         unsigned int i, eop;
3615         unsigned int count = 0;
3616         unsigned int total_tx_bytes=0, total_tx_packets=0;
3617
3618         i = tx_ring->next_to_clean;
3619         eop = tx_ring->buffer_info[i].next_to_watch;
3620         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3621
3622         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3623                (count < tx_ring->count)) {
3624                 bool cleaned = false;
3625                 rmb();  /* read buffer_info after eop_desc */
3626                 for ( ; !cleaned; count++) {
3627                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3628                         buffer_info = &tx_ring->buffer_info[i];
3629                         cleaned = (i == eop);
3630
3631                         if (cleaned) {
3632                                 total_tx_packets += buffer_info->segs;
3633                                 total_tx_bytes += buffer_info->bytecount;
3634                         }
3635                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3636                         tx_desc->upper.data = 0;
3637
3638                         if (unlikely(++i == tx_ring->count)) i = 0;
3639                 }
3640
3641                 eop = tx_ring->buffer_info[i].next_to_watch;
3642                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3643         }
3644
3645         tx_ring->next_to_clean = i;
3646
3647 #define TX_WAKE_THRESHOLD 32
3648         if (unlikely(count && netif_carrier_ok(netdev) &&
3649                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3650                 /* Make sure that anybody stopping the queue after this
3651                  * sees the new next_to_clean.
3652                  */
3653                 smp_mb();
3654
3655                 if (netif_queue_stopped(netdev) &&
3656                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3657                         netif_wake_queue(netdev);
3658                         ++adapter->restart_queue;
3659                 }
3660         }
3661
3662         if (adapter->detect_tx_hung) {
3663                 /* Detect a transmit hang in hardware, this serializes the
3664                  * check with the clearing of time_stamp and movement of i */
3665                 adapter->detect_tx_hung = false;
3666                 if (tx_ring->buffer_info[eop].time_stamp &&
3667                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3668                                (adapter->tx_timeout_factor * HZ)) &&
3669                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3670
3671                         /* detected Tx unit hang */
3672                         e_err(drv, "Detected Tx Unit Hang\n"
3673                               "  Tx Queue             <%lu>\n"
3674                               "  TDH                  <%x>\n"
3675                               "  TDT                  <%x>\n"
3676                               "  next_to_use          <%x>\n"
3677                               "  next_to_clean        <%x>\n"
3678                               "buffer_info[next_to_clean]\n"
3679                               "  time_stamp           <%lx>\n"
3680                               "  next_to_watch        <%x>\n"
3681                               "  jiffies              <%lx>\n"
3682                               "  next_to_watch.status <%x>\n",
3683                                 (unsigned long)((tx_ring - adapter->tx_ring) /
3684                                         sizeof(struct e1000_tx_ring)),
3685                                 readl(hw->hw_addr + tx_ring->tdh),
3686                                 readl(hw->hw_addr + tx_ring->tdt),
3687                                 tx_ring->next_to_use,
3688                                 tx_ring->next_to_clean,
3689                                 tx_ring->buffer_info[eop].time_stamp,
3690                                 eop,
3691                                 jiffies,
3692                                 eop_desc->upper.fields.status);
3693                         netif_stop_queue(netdev);
3694                 }
3695         }
3696         adapter->total_tx_bytes += total_tx_bytes;
3697         adapter->total_tx_packets += total_tx_packets;
3698         netdev->stats.tx_bytes += total_tx_bytes;
3699         netdev->stats.tx_packets += total_tx_packets;
3700         return count < tx_ring->count;
3701 }
3702
3703 /**
3704  * e1000_rx_checksum - Receive Checksum Offload for 82543
3705  * @adapter:     board private structure
3706  * @status_err:  receive descriptor status and error fields
3707  * @csum:        receive descriptor csum field
3708  * @sk_buff:     socket buffer with received data
3709  **/
3710
3711 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3712                               u32 csum, struct sk_buff *skb)
3713 {
3714         struct e1000_hw *hw = &adapter->hw;
3715         u16 status = (u16)status_err;
3716         u8 errors = (u8)(status_err >> 24);
3717
3718         skb_checksum_none_assert(skb);
3719
3720         /* 82543 or newer only */
3721         if (unlikely(hw->mac_type < e1000_82543)) return;
3722         /* Ignore Checksum bit is set */
3723         if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3724         /* TCP/UDP checksum error bit is set */
3725         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3726                 /* let the stack verify checksum errors */
3727                 adapter->hw_csum_err++;
3728                 return;
3729         }
3730         /* TCP/UDP Checksum has not been calculated */
3731         if (!(status & E1000_RXD_STAT_TCPCS))
3732                 return;
3733
3734         /* It must be a TCP or UDP packet with a valid checksum */
3735         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3736                 /* TCP checksum is good */
3737                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3738         }
3739         adapter->hw_csum_good++;
3740 }
3741
3742 /**
3743  * e1000_consume_page - helper function
3744  **/
3745 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3746                                u16 length)
3747 {
3748         bi->page = NULL;
3749         skb->len += length;
3750         skb->data_len += length;
3751         skb->truesize += length;
3752 }
3753
3754 /**
3755  * e1000_receive_skb - helper function to handle rx indications
3756  * @adapter: board private structure
3757  * @status: descriptor status field as written by hardware
3758  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3759  * @skb: pointer to sk_buff to be indicated to stack
3760  */
3761 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
3762                               __le16 vlan, struct sk_buff *skb)
3763 {
3764         skb->protocol = eth_type_trans(skb, adapter->netdev);
3765
3766         if (status & E1000_RXD_STAT_VP) {
3767                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
3768
3769                 __vlan_hwaccel_put_tag(skb, vid);
3770         }
3771         napi_gro_receive(&adapter->napi, skb);
3772 }
3773
3774 /**
3775  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
3776  * @adapter: board private structure
3777  * @rx_ring: ring to clean
3778  * @work_done: amount of napi work completed this call
3779  * @work_to_do: max amount of work allowed for this call to do
3780  *
3781  * the return value indicates whether actual cleaning was done, there
3782  * is no guarantee that everything was cleaned
3783  */
3784 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3785                                      struct e1000_rx_ring *rx_ring,
3786                                      int *work_done, int work_to_do)
3787 {
3788         struct e1000_hw *hw = &adapter->hw;
3789         struct net_device *netdev = adapter->netdev;
3790         struct pci_dev *pdev = adapter->pdev;
3791         struct e1000_rx_desc *rx_desc, *next_rxd;
3792         struct e1000_buffer *buffer_info, *next_buffer;
3793         unsigned long irq_flags;
3794         u32 length;
3795         unsigned int i;
3796         int cleaned_count = 0;
3797         bool cleaned = false;
3798         unsigned int total_rx_bytes=0, total_rx_packets=0;
3799
3800         i = rx_ring->next_to_clean;
3801         rx_desc = E1000_RX_DESC(*rx_ring, i);
3802         buffer_info = &rx_ring->buffer_info[i];
3803
3804         while (rx_desc->status & E1000_RXD_STAT_DD) {
3805                 struct sk_buff *skb;
3806                 u8 status;
3807
3808                 if (*work_done >= work_to_do)
3809                         break;
3810                 (*work_done)++;
3811                 rmb(); /* read descriptor and rx_buffer_info after status DD */
3812
3813                 status = rx_desc->status;
3814                 skb = buffer_info->skb;
3815                 buffer_info->skb = NULL;
3816
3817                 if (++i == rx_ring->count) i = 0;
3818                 next_rxd = E1000_RX_DESC(*rx_ring, i);
3819                 prefetch(next_rxd);
3820
3821                 next_buffer = &rx_ring->buffer_info[i];
3822
3823                 cleaned = true;
3824                 cleaned_count++;
3825                 dma_unmap_page(&pdev->dev, buffer_info->dma,
3826                                buffer_info->length, DMA_FROM_DEVICE);
3827                 buffer_info->dma = 0;
3828
3829                 length = le16_to_cpu(rx_desc->length);
3830
3831                 /* errors is only valid for DD + EOP descriptors */
3832                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
3833                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
3834                         u8 last_byte = *(skb->data + length - 1);
3835                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
3836                                        last_byte)) {
3837                                 spin_lock_irqsave(&adapter->stats_lock,
3838                                                   irq_flags);
3839                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
3840                                                        length, skb->data);
3841                                 spin_unlock_irqrestore(&adapter->stats_lock,
3842                                                        irq_flags);
3843                                 length--;
3844                         } else {
3845                                 /* recycle both page and skb */
3846                                 buffer_info->skb = skb;
3847                                 /* an error means any chain goes out the window
3848                                  * too */
3849                                 if (rx_ring->rx_skb_top)
3850                                         dev_kfree_skb(rx_ring->rx_skb_top);
3851                                 rx_ring->rx_skb_top = NULL;
3852                                 goto next_desc;
3853                         }
3854                 }
3855
3856 #define rxtop rx_ring->rx_skb_top
3857                 if (!(status & E1000_RXD_STAT_EOP)) {
3858                         /* this descriptor is only the beginning (or middle) */
3859                         if (!rxtop) {
3860                                 /* this is the beginning of a chain */
3861                                 rxtop = skb;
3862                                 skb_fill_page_desc(rxtop, 0, buffer_info->page,
3863                                                    0, length);
3864                         } else {
3865                                 /* this is the middle of a chain */
3866                                 skb_fill_page_desc(rxtop,
3867                                     skb_shinfo(rxtop)->nr_frags,
3868                                     buffer_info->page, 0, length);
3869                                 /* re-use the skb, only consumed the page */
3870                                 buffer_info->skb = skb;
3871                         }
3872                         e1000_consume_page(buffer_info, rxtop, length);
3873                         goto next_desc;
3874                 } else {
3875                         if (rxtop) {
3876                                 /* end of the chain */
3877                                 skb_fill_page_desc(rxtop,
3878                                     skb_shinfo(rxtop)->nr_frags,
3879                                     buffer_info->page, 0, length);
3880                                 /* re-use the current skb, we only consumed the
3881                                  * page */
3882                                 buffer_info->skb = skb;
3883                                 skb = rxtop;
3884                                 rxtop = NULL;
3885                                 e1000_consume_page(buffer_info, skb, length);
3886                         } else {
3887                                 /* no chain, got EOP, this buf is the packet
3888                                  * copybreak to save the put_page/alloc_page */
3889                                 if (length <= copybreak &&
3890                                     skb_tailroom(skb) >= length) {
3891                                         u8 *vaddr;
3892                                         vaddr = kmap_atomic(buffer_info->page,
3893                                                             KM_SKB_DATA_SOFTIRQ);
3894                                         memcpy(skb_tail_pointer(skb), vaddr, length);
3895                                         kunmap_atomic(vaddr,
3896                                                       KM_SKB_DATA_SOFTIRQ);
3897                                         /* re-use the page, so don't erase
3898                                          * buffer_info->page */
3899                                         skb_put(skb, length);
3900                                 } else {
3901                                         skb_fill_page_desc(skb, 0,
3902                                                            buffer_info->page, 0,
3903                                                            length);
3904                                         e1000_consume_page(buffer_info, skb,
3905                                                            length);
3906                                 }
3907                         }
3908                 }
3909
3910                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
3911                 e1000_rx_checksum(adapter,
3912                                   (u32)(status) |
3913                                   ((u32)(rx_desc->errors) << 24),
3914                                   le16_to_cpu(rx_desc->csum), skb);
3915
3916                 pskb_trim(skb, skb->len - 4);
3917
3918                 /* probably a little skewed due to removing CRC */
3919                 total_rx_bytes += skb->len;
3920                 total_rx_packets++;
3921
3922                 /* eth type trans needs skb->data to point to something */
3923                 if (!pskb_may_pull(skb, ETH_HLEN)) {
3924                         e_err(drv, "pskb_may_pull failed.\n");
3925                         dev_kfree_skb(skb);
3926                         goto next_desc;
3927                 }
3928
3929                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
3930
3931 next_desc:
3932                 rx_desc->status = 0;
3933
3934                 /* return some buffers to hardware, one at a time is too slow */
3935                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
3936                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3937                         cleaned_count = 0;
3938                 }
3939
3940                 /* use prefetched values */
3941                 rx_desc = next_rxd;
3942                 buffer_info = next_buffer;
3943         }
3944         rx_ring->next_to_clean = i;
3945
3946         cleaned_count = E1000_DESC_UNUSED(rx_ring);
3947         if (cleaned_count)
3948                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
3949
3950         adapter->total_rx_packets += total_rx_packets;
3951         adapter->total_rx_bytes += total_rx_bytes;
3952         netdev->stats.rx_bytes += total_rx_bytes;
3953         netdev->stats.rx_packets += total_rx_packets;
3954         return cleaned;
3955 }
3956
3957 /*
3958  * this should improve performance for small packets with large amounts
3959  * of reassembly being done in the stack
3960  */
3961 static void e1000_check_copybreak(struct net_device *netdev,
3962                                  struct e1000_buffer *buffer_info,
3963                                  u32 length, struct sk_buff **skb)
3964 {
3965         struct sk_buff *new_skb;
3966
3967         if (length > copybreak)
3968                 return;
3969
3970         new_skb = netdev_alloc_skb_ip_align(netdev, length);
3971         if (!new_skb)
3972                 return;
3973
3974         skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
3975                                        (*skb)->data - NET_IP_ALIGN,
3976                                        length + NET_IP_ALIGN);
3977         /* save the skb in buffer_info as good */
3978         buffer_info->skb = *skb;
3979         *skb = new_skb;
3980 }
3981
3982 /**
3983  * e1000_clean_rx_irq - Send received data up the network stack; legacy
3984  * @adapter: board private structure
3985  * @rx_ring: ring to clean
3986  * @work_done: amount of napi work completed this call
3987  * @work_to_do: max amount of work allowed for this call to do
3988  */
3989 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3990                                struct e1000_rx_ring *rx_ring,
3991                                int *work_done, int work_to_do)
3992 {
3993         struct e1000_hw *hw = &adapter->hw;
3994         struct net_device *netdev = adapter->netdev;
3995         struct pci_dev *pdev = adapter->pdev;
3996         struct e1000_rx_desc *rx_desc, *next_rxd;
3997         struct e1000_buffer *buffer_info, *next_buffer;
3998         unsigned long flags;
3999         u32 length;
4000         unsigned int i;
4001         int cleaned_count = 0;
4002         bool cleaned = false;
4003         unsigned int total_rx_bytes=0, total_rx_packets=0;
4004
4005         i = rx_ring->next_to_clean;
4006         rx_desc = E1000_RX_DESC(*rx_ring, i);
4007         buffer_info = &rx_ring->buffer_info[i];
4008
4009         while (rx_desc->status & E1000_RXD_STAT_DD) {
4010                 struct sk_buff *skb;
4011                 u8 status;
4012
4013                 if (*work_done >= work_to_do)
4014                         break;
4015                 (*work_done)++;
4016                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4017
4018                 status = rx_desc->status;
4019                 skb = buffer_info->skb;
4020                 buffer_info->skb = NULL;
4021
4022                 prefetch(skb->data - NET_IP_ALIGN);
4023
4024                 if (++i == rx_ring->count) i = 0;
4025                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4026                 prefetch(next_rxd);
4027
4028                 next_buffer = &rx_ring->buffer_info[i];
4029
4030                 cleaned = true;
4031                 cleaned_count++;
4032                 dma_unmap_single(&pdev->dev, buffer_info->dma,
4033                                  buffer_info->length, DMA_FROM_DEVICE);
4034                 buffer_info->dma = 0;
4035
4036                 length = le16_to_cpu(rx_desc->length);
4037                 /* !EOP means multiple descriptors were used to store a single
4038                  * packet, if thats the case we need to toss it.  In fact, we
4039                  * to toss every packet with the EOP bit clear and the next
4040                  * frame that _does_ have the EOP bit set, as it is by
4041                  * definition only a frame fragment
4042                  */
4043                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4044                         adapter->discarding = true;
4045
4046                 if (adapter->discarding) {
4047                         /* All receives must fit into a single buffer */
4048                         e_dbg("Receive packet consumed multiple buffers\n");
4049                         /* recycle */
4050                         buffer_info->skb = skb;
4051                         if (status & E1000_RXD_STAT_EOP)
4052                                 adapter->discarding = false;
4053                         goto next_desc;
4054                 }
4055
4056                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4057                         u8 last_byte = *(skb->data + length - 1);
4058                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4059                                        last_byte)) {
4060                                 spin_lock_irqsave(&adapter->stats_lock, flags);
4061                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4062                                                        length, skb->data);
4063                                 spin_unlock_irqrestore(&adapter->stats_lock,
4064                                                        flags);
4065                                 length--;
4066                         } else {
4067                                 /* recycle */
4068                                 buffer_info->skb = skb;
4069                                 goto next_desc;
4070                         }
4071                 }
4072
4073                 /* adjust length to remove Ethernet CRC, this must be
4074                  * done after the TBI_ACCEPT workaround above */
4075                 length -= 4;
4076
4077                 /* probably a little skewed due to removing CRC */
4078                 total_rx_bytes += length;
4079                 total_rx_packets++;
4080
4081                 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4082
4083                 skb_put(skb, length);
4084
4085                 /* Receive Checksum Offload */
4086                 e1000_rx_checksum(adapter,
4087                                   (u32)(status) |
4088                                   ((u32)(rx_desc->errors) << 24),
4089                                   le16_to_cpu(rx_desc->csum), skb);
4090
4091                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4092
4093 next_desc:
4094                 rx_desc->status = 0;
4095
4096                 /* return some buffers to hardware, one at a time is too slow */
4097                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4098                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4099                         cleaned_count = 0;
4100                 }
4101
4102                 /* use prefetched values */
4103                 rx_desc = next_rxd;
4104                 buffer_info = next_buffer;
4105         }
4106         rx_ring->next_to_clean = i;
4107
4108         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4109         if (cleaned_count)
4110                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4111
4112         adapter->total_rx_packets += total_rx_packets;
4113         adapter->total_rx_bytes += total_rx_bytes;
4114         netdev->stats.rx_bytes += total_rx_bytes;
4115         netdev->stats.rx_packets += total_rx_packets;
4116         return cleaned;
4117 }
4118
4119 /**
4120  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4121  * @adapter: address of board private structure
4122  * @rx_ring: pointer to receive ring structure
4123  * @cleaned_count: number of buffers to allocate this pass
4124  **/
4125
4126 static void
4127 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4128                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4129 {
4130         struct net_device *netdev = adapter->netdev;
4131         struct pci_dev *pdev = adapter->pdev;
4132         struct e1000_rx_desc *rx_desc;
4133         struct e1000_buffer *buffer_info;
4134         struct sk_buff *skb;
4135         unsigned int i;
4136         unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4137
4138         i = rx_ring->next_to_use;
4139         buffer_info = &rx_ring->buffer_info[i];
4140
4141         while (cleaned_count--) {
4142                 skb = buffer_info->skb;
4143                 if (skb) {
4144                         skb_trim(skb, 0);
4145                         goto check_page;
4146                 }
4147
4148                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4149                 if (unlikely(!skb)) {
4150                         /* Better luck next round */
4151                         adapter->alloc_rx_buff_failed++;
4152                         break;
4153                 }
4154
4155                 /* Fix for errata 23, can't cross 64kB boundary */
4156                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4157                         struct sk_buff *oldskb = skb;
4158                         e_err(rx_err, "skb align check failed: %u bytes at "
4159                               "%p\n", bufsz, skb->data);
4160                         /* Try again, without freeing the previous */
4161                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4162                         /* Failed allocation, critical failure */
4163                         if (!skb) {
4164                                 dev_kfree_skb(oldskb);
4165                                 adapter->alloc_rx_buff_failed++;
4166                                 break;
4167                         }
4168
4169                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4170                                 /* give up */
4171                                 dev_kfree_skb(skb);
4172                                 dev_kfree_skb(oldskb);
4173                                 break; /* while (cleaned_count--) */
4174                         }
4175
4176                         /* Use new allocation */
4177                         dev_kfree_skb(oldskb);
4178                 }
4179                 buffer_info->skb = skb;
4180                 buffer_info->length = adapter->rx_buffer_len;
4181 check_page:
4182                 /* allocate a new page if necessary */
4183                 if (!buffer_info->page) {
4184                         buffer_info->page = alloc_page(GFP_ATOMIC);
4185                         if (unlikely(!buffer_info->page)) {
4186                                 adapter->alloc_rx_buff_failed++;
4187                                 break;
4188                         }
4189                 }
4190
4191                 if (!buffer_info->dma) {
4192                         buffer_info->dma = dma_map_page(&pdev->dev,
4193                                                         buffer_info->page, 0,
4194                                                         buffer_info->length,
4195                                                         DMA_FROM_DEVICE);
4196                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4197                                 put_page(buffer_info->page);
4198                                 dev_kfree_skb(skb);
4199                                 buffer_info->page = NULL;
4200                                 buffer_info->skb = NULL;
4201                                 buffer_info->dma = 0;
4202                                 adapter->alloc_rx_buff_failed++;
4203                                 break; /* while !buffer_info->skb */
4204                         }
4205                 }
4206
4207                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4208                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4209
4210                 if (unlikely(++i == rx_ring->count))
4211                         i = 0;
4212                 buffer_info = &rx_ring->buffer_info[i];
4213         }
4214
4215         if (likely(rx_ring->next_to_use != i)) {
4216                 rx_ring->next_to_use = i;
4217                 if (unlikely(i-- == 0))
4218                         i = (rx_ring->count - 1);
4219
4220                 /* Force memory writes to complete before letting h/w
4221                  * know there are new descriptors to fetch.  (Only
4222                  * applicable for weak-ordered memory model archs,
4223                  * such as IA-64). */
4224                 wmb();
4225                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4226         }
4227 }
4228
4229 /**
4230  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4231  * @adapter: address of board private structure
4232  **/
4233
4234 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4235                                    struct e1000_rx_ring *rx_ring,
4236                                    int cleaned_count)
4237 {
4238         struct e1000_hw *hw = &adapter->hw;
4239         struct net_device *netdev = adapter->netdev;
4240         struct pci_dev *pdev = adapter->pdev;
4241         struct e1000_rx_desc *rx_desc;
4242         struct e1000_buffer *buffer_info;
4243         struct sk_buff *skb;
4244         unsigned int i;
4245         unsigned int bufsz = adapter->rx_buffer_len;
4246
4247         i = rx_ring->next_to_use;
4248         buffer_info = &rx_ring->buffer_info[i];
4249
4250         while (cleaned_count--) {
4251                 skb = buffer_info->skb;
4252                 if (skb) {
4253                         skb_trim(skb, 0);
4254                         goto map_skb;
4255                 }
4256
4257                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4258                 if (unlikely(!skb)) {
4259                         /* Better luck next round */
4260                         adapter->alloc_rx_buff_failed++;
4261                         break;
4262                 }
4263
4264                 /* Fix for errata 23, can't cross 64kB boundary */
4265                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4266                         struct sk_buff *oldskb = skb;
4267                         e_err(rx_err, "skb align check failed: %u bytes at "
4268                               "%p\n", bufsz, skb->data);
4269                         /* Try again, without freeing the previous */
4270                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4271                         /* Failed allocation, critical failure */
4272                         if (!skb) {
4273                                 dev_kfree_skb(oldskb);
4274                                 adapter->alloc_rx_buff_failed++;
4275                                 break;
4276                         }
4277
4278                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4279                                 /* give up */
4280                                 dev_kfree_skb(skb);
4281                                 dev_kfree_skb(oldskb);
4282                                 adapter->alloc_rx_buff_failed++;
4283                                 break; /* while !buffer_info->skb */
4284                         }
4285
4286                         /* Use new allocation */
4287                         dev_kfree_skb(oldskb);
4288                 }
4289                 buffer_info->skb = skb;
4290                 buffer_info->length = adapter->rx_buffer_len;
4291 map_skb:
4292                 buffer_info->dma = dma_map_single(&pdev->dev,
4293                                                   skb->data,
4294                                                   buffer_info->length,
4295                                                   DMA_FROM_DEVICE);
4296                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4297                         dev_kfree_skb(skb);
4298                         buffer_info->skb = NULL;
4299                         buffer_info->dma = 0;
4300                         adapter->alloc_rx_buff_failed++;
4301                         break; /* while !buffer_info->skb */
4302                 }
4303
4304                 /*
4305                  * XXX if it was allocated cleanly it will never map to a
4306                  * boundary crossing
4307                  */
4308
4309                 /* Fix for errata 23, can't cross 64kB boundary */
4310                 if (!e1000_check_64k_bound(adapter,
4311                                         (void *)(unsigned long)buffer_info->dma,
4312                                         adapter->rx_buffer_len)) {
4313                         e_err(rx_err, "dma align check failed: %u bytes at "
4314                               "%p\n", adapter->rx_buffer_len,
4315                               (void *)(unsigned long)buffer_info->dma);
4316                         dev_kfree_skb(skb);
4317                         buffer_info->skb = NULL;
4318
4319                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4320                                          adapter->rx_buffer_len,
4321                                          DMA_FROM_DEVICE);
4322                         buffer_info->dma = 0;
4323
4324                         adapter->alloc_rx_buff_failed++;
4325                         break; /* while !buffer_info->skb */
4326                 }
4327                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4328                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4329
4330                 if (unlikely(++i == rx_ring->count))
4331                         i = 0;
4332                 buffer_info = &rx_ring->buffer_info[i];
4333         }
4334
4335         if (likely(rx_ring->next_to_use != i)) {
4336                 rx_ring->next_to_use = i;
4337                 if (unlikely(i-- == 0))
4338                         i = (rx_ring->count - 1);
4339
4340                 /* Force memory writes to complete before letting h/w
4341                  * know there are new descriptors to fetch.  (Only
4342                  * applicable for weak-ordered memory model archs,
4343                  * such as IA-64). */
4344                 wmb();
4345                 writel(i, hw->hw_addr + rx_ring->rdt);
4346         }
4347 }
4348
4349 /**
4350  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4351  * @adapter:
4352  **/
4353
4354 static void e1000_smartspeed(struct e1000_adapter *adapter)
4355 {
4356         struct e1000_hw *hw = &adapter->hw;
4357         u16 phy_status;
4358         u16 phy_ctrl;
4359
4360         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4361            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4362                 return;
4363
4364         if (adapter->smartspeed == 0) {
4365                 /* If Master/Slave config fault is asserted twice,
4366                  * we assume back-to-back */
4367                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4368                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4369                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4370                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4371                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4372                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4373                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4374                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4375                                             phy_ctrl);
4376                         adapter->smartspeed++;
4377                         if (!e1000_phy_setup_autoneg(hw) &&
4378                            !e1000_read_phy_reg(hw, PHY_CTRL,
4379                                                &phy_ctrl)) {
4380                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4381                                              MII_CR_RESTART_AUTO_NEG);
4382                                 e1000_write_phy_reg(hw, PHY_CTRL,
4383                                                     phy_ctrl);
4384                         }
4385                 }
4386                 return;
4387         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4388                 /* If still no link, perhaps using 2/3 pair cable */
4389                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4390                 phy_ctrl |= CR_1000T_MS_ENABLE;
4391                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4392                 if (!e1000_phy_setup_autoneg(hw) &&
4393                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4394                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4395                                      MII_CR_RESTART_AUTO_NEG);
4396                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4397                 }
4398         }
4399         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4400         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4401                 adapter->smartspeed = 0;
4402 }
4403
4404 /**
4405  * e1000_ioctl -
4406  * @netdev:
4407  * @ifreq:
4408  * @cmd:
4409  **/
4410
4411 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4412 {
4413         switch (cmd) {
4414         case SIOCGMIIPHY:
4415         case SIOCGMIIREG:
4416         case SIOCSMIIREG:
4417                 return e1000_mii_ioctl(netdev, ifr, cmd);
4418         default:
4419                 return -EOPNOTSUPP;
4420         }
4421 }
4422
4423 /**
4424  * e1000_mii_ioctl -
4425  * @netdev:
4426  * @ifreq:
4427  * @cmd:
4428  **/
4429
4430 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4431                            int cmd)
4432 {
4433         struct e1000_adapter *adapter = netdev_priv(netdev);
4434         struct e1000_hw *hw = &adapter->hw;
4435         struct mii_ioctl_data *data = if_mii(ifr);
4436         int retval;
4437         u16 mii_reg;
4438         unsigned long flags;
4439
4440         if (hw->media_type != e1000_media_type_copper)
4441                 return -EOPNOTSUPP;
4442
4443         switch (cmd) {
4444         case SIOCGMIIPHY:
4445                 data->phy_id = hw->phy_addr;
4446                 break;
4447         case SIOCGMIIREG:
4448                 spin_lock_irqsave(&adapter->stats_lock, flags);
4449                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4450                                    &data->val_out)) {
4451                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4452                         return -EIO;
4453                 }
4454                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4455                 break;
4456         case SIOCSMIIREG:
4457                 if (data->reg_num & ~(0x1F))
4458                         return -EFAULT;
4459                 mii_reg = data->val_in;
4460                 spin_lock_irqsave(&adapter->stats_lock, flags);
4461                 if (e1000_write_phy_reg(hw, data->reg_num,
4462                                         mii_reg)) {
4463                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4464                         return -EIO;
4465                 }
4466                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4467                 if (hw->media_type == e1000_media_type_copper) {
4468                         switch (data->reg_num) {
4469                         case PHY_CTRL:
4470                                 if (mii_reg & MII_CR_POWER_DOWN)
4471                                         break;
4472                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4473                                         hw->autoneg = 1;
4474                                         hw->autoneg_advertised = 0x2F;
4475                                 } else {
4476                                         u32 speed;
4477                                         if (mii_reg & 0x40)
4478                                                 speed = SPEED_1000;
4479                                         else if (mii_reg & 0x2000)
4480                                                 speed = SPEED_100;
4481                                         else
4482                                                 speed = SPEED_10;
4483                                         retval = e1000_set_spd_dplx(
4484                                                 adapter, speed,
4485                                                 ((mii_reg & 0x100)
4486                                                  ? DUPLEX_FULL :
4487                                                  DUPLEX_HALF));
4488                                         if (retval)
4489                                                 return retval;
4490                                 }
4491                                 if (netif_running(adapter->netdev))
4492                                         e1000_reinit_locked(adapter);
4493                                 else
4494                                         e1000_reset(adapter);
4495                                 break;
4496                         case M88E1000_PHY_SPEC_CTRL:
4497                         case M88E1000_EXT_PHY_SPEC_CTRL:
4498                                 if (e1000_phy_reset(hw))
4499                                         return -EIO;
4500                                 break;
4501                         }
4502                 } else {
4503                         switch (data->reg_num) {
4504                         case PHY_CTRL:
4505                                 if (mii_reg & MII_CR_POWER_DOWN)
4506                                         break;
4507                                 if (netif_running(adapter->netdev))
4508                                         e1000_reinit_locked(adapter);
4509                                 else
4510                                         e1000_reset(adapter);
4511                                 break;
4512                         }
4513                 }
4514                 break;
4515         default:
4516                 return -EOPNOTSUPP;
4517         }
4518         return E1000_SUCCESS;
4519 }
4520
4521 void e1000_pci_set_mwi(struct e1000_hw *hw)
4522 {
4523         struct e1000_adapter *adapter = hw->back;
4524         int ret_val = pci_set_mwi(adapter->pdev);
4525
4526         if (ret_val)
4527                 e_err(probe, "Error in setting MWI\n");
4528 }
4529
4530 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4531 {
4532         struct e1000_adapter *adapter = hw->back;
4533
4534         pci_clear_mwi(adapter->pdev);
4535 }
4536
4537 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4538 {
4539         struct e1000_adapter *adapter = hw->back;
4540         return pcix_get_mmrbc(adapter->pdev);
4541 }
4542
4543 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4544 {
4545         struct e1000_adapter *adapter = hw->back;
4546         pcix_set_mmrbc(adapter->pdev, mmrbc);
4547 }
4548
4549 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4550 {
4551         outl(value, port);
4552 }
4553
4554 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4555 {
4556         u16 vid;
4557
4558         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4559                 return true;
4560         return false;
4561 }
4562
4563 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4564                                      bool filter_on)
4565 {
4566         struct e1000_hw *hw = &adapter->hw;
4567         u32 rctl;
4568
4569         if (!test_bit(__E1000_DOWN, &adapter->flags))
4570                 e1000_irq_disable(adapter);
4571
4572         if (filter_on) {
4573                 /* enable VLAN receive filtering */
4574                 rctl = er32(RCTL);
4575                 rctl &= ~E1000_RCTL_CFIEN;
4576                 if (!(adapter->netdev->flags & IFF_PROMISC))
4577                         rctl |= E1000_RCTL_VFE;
4578                 ew32(RCTL, rctl);
4579                 e1000_update_mng_vlan(adapter);
4580         } else {
4581                 /* disable VLAN receive filtering */
4582                 rctl = er32(RCTL);
4583                 rctl &= ~E1000_RCTL_VFE;
4584                 ew32(RCTL, rctl);
4585         }
4586
4587         if (!test_bit(__E1000_DOWN, &adapter->flags))
4588                 e1000_irq_enable(adapter);
4589 }
4590
4591 static void e1000_vlan_mode(struct net_device *netdev, u32 features)
4592 {
4593         struct e1000_adapter *adapter = netdev_priv(netdev);
4594         struct e1000_hw *hw = &adapter->hw;
4595         u32 ctrl;
4596
4597         if (!test_bit(__E1000_DOWN, &adapter->flags))
4598                 e1000_irq_disable(adapter);
4599
4600         ctrl = er32(CTRL);
4601         if (features & NETIF_F_HW_VLAN_RX) {
4602                 /* enable VLAN tag insert/strip */
4603                 ctrl |= E1000_CTRL_VME;
4604         } else {
4605                 /* disable VLAN tag insert/strip */
4606                 ctrl &= ~E1000_CTRL_VME;
4607         }
4608         ew32(CTRL, ctrl);
4609
4610         if (!test_bit(__E1000_DOWN, &adapter->flags))
4611                 e1000_irq_enable(adapter);
4612 }
4613
4614 static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4615 {
4616         struct e1000_adapter *adapter = netdev_priv(netdev);
4617         struct e1000_hw *hw = &adapter->hw;
4618         u32 vfta, index;
4619
4620         if ((hw->mng_cookie.status &
4621              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4622             (vid == adapter->mng_vlan_id))
4623                 return;
4624
4625         if (!e1000_vlan_used(adapter))
4626                 e1000_vlan_filter_on_off(adapter, true);
4627
4628         /* add VID to filter table */
4629         index = (vid >> 5) & 0x7F;
4630         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4631         vfta |= (1 << (vid & 0x1F));
4632         e1000_write_vfta(hw, index, vfta);
4633
4634         set_bit(vid, adapter->active_vlans);
4635 }
4636
4637 static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4638 {
4639         struct e1000_adapter *adapter = netdev_priv(netdev);
4640         struct e1000_hw *hw = &adapter->hw;
4641         u32 vfta, index;
4642
4643         if (!test_bit(__E1000_DOWN, &adapter->flags))
4644                 e1000_irq_disable(adapter);
4645         if (!test_bit(__E1000_DOWN, &adapter->flags))
4646                 e1000_irq_enable(adapter);
4647
4648         /* remove VID from filter table */
4649         index = (vid >> 5) & 0x7F;
4650         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4651         vfta &= ~(1 << (vid & 0x1F));
4652         e1000_write_vfta(hw, index, vfta);
4653
4654         clear_bit(vid, adapter->active_vlans);
4655
4656         if (!e1000_vlan_used(adapter))
4657                 e1000_vlan_filter_on_off(adapter, false);
4658 }
4659
4660 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4661 {
4662         u16 vid;
4663
4664         if (!e1000_vlan_used(adapter))
4665                 return;
4666
4667         e1000_vlan_filter_on_off(adapter, true);
4668         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4669                 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4670 }
4671
4672 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4673 {
4674         struct e1000_hw *hw = &adapter->hw;
4675
4676         hw->autoneg = 0;
4677
4678         /* Make sure dplx is at most 1 bit and lsb of speed is not set
4679          * for the switch() below to work */
4680         if ((spd & 1) || (dplx & ~1))
4681                 goto err_inval;
4682
4683         /* Fiber NICs only allow 1000 gbps Full duplex */
4684         if ((hw->media_type == e1000_media_type_fiber) &&
4685             spd != SPEED_1000 &&
4686             dplx != DUPLEX_FULL)
4687                 goto err_inval;
4688
4689         switch (spd + dplx) {
4690         case SPEED_10 + DUPLEX_HALF:
4691                 hw->forced_speed_duplex = e1000_10_half;
4692                 break;
4693         case SPEED_10 + DUPLEX_FULL:
4694                 hw->forced_speed_duplex = e1000_10_full;
4695                 break;
4696         case SPEED_100 + DUPLEX_HALF:
4697                 hw->forced_speed_duplex = e1000_100_half;
4698                 break;
4699         case SPEED_100 + DUPLEX_FULL:
4700                 hw->forced_speed_duplex = e1000_100_full;
4701                 break;
4702         case SPEED_1000 + DUPLEX_FULL:
4703                 hw->autoneg = 1;
4704                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4705                 break;
4706         case SPEED_1000 + DUPLEX_HALF: /* not supported */
4707         default:
4708                 goto err_inval;
4709         }
4710         return 0;
4711
4712 err_inval:
4713         e_err(probe, "Unsupported Speed/Duplex configuration\n");
4714         return -EINVAL;
4715 }
4716
4717 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4718 {
4719         struct net_device *netdev = pci_get_drvdata(pdev);
4720         struct e1000_adapter *adapter = netdev_priv(netdev);
4721         struct e1000_hw *hw = &adapter->hw;
4722         u32 ctrl, ctrl_ext, rctl, status;
4723         u32 wufc = adapter->wol;
4724 #ifdef CONFIG_PM
4725         int retval = 0;
4726 #endif
4727
4728         netif_device_detach(netdev);
4729
4730         if (netif_running(netdev)) {
4731                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4732                 e1000_down(adapter);
4733         }
4734
4735 #ifdef CONFIG_PM
4736         retval = pci_save_state(pdev);
4737         if (retval)
4738                 return retval;
4739 #endif
4740
4741         status = er32(STATUS);
4742         if (status & E1000_STATUS_LU)
4743                 wufc &= ~E1000_WUFC_LNKC;
4744
4745         if (wufc) {
4746                 e1000_setup_rctl(adapter);
4747                 e1000_set_rx_mode(netdev);
4748
4749                 /* turn on all-multi mode if wake on multicast is enabled */
4750                 if (wufc & E1000_WUFC_MC) {
4751                         rctl = er32(RCTL);
4752                         rctl |= E1000_RCTL_MPE;
4753                         ew32(RCTL, rctl);
4754                 }
4755
4756                 if (hw->mac_type >= e1000_82540) {
4757                         ctrl = er32(CTRL);
4758                         /* advertise wake from D3Cold */
4759                         #define E1000_CTRL_ADVD3WUC 0x00100000
4760                         /* phy power management enable */
4761                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4762                         ctrl |= E1000_CTRL_ADVD3WUC |
4763                                 E1000_CTRL_EN_PHY_PWR_MGMT;
4764                         ew32(CTRL, ctrl);
4765                 }
4766
4767                 if (hw->media_type == e1000_media_type_fiber ||
4768                     hw->media_type == e1000_media_type_internal_serdes) {
4769                         /* keep the laser running in D3 */
4770                         ctrl_ext = er32(CTRL_EXT);
4771                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
4772                         ew32(CTRL_EXT, ctrl_ext);
4773                 }
4774
4775                 ew32(WUC, E1000_WUC_PME_EN);
4776                 ew32(WUFC, wufc);
4777         } else {
4778                 ew32(WUC, 0);
4779                 ew32(WUFC, 0);
4780         }
4781
4782         e1000_release_manageability(adapter);
4783
4784         *enable_wake = !!wufc;
4785
4786         /* make sure adapter isn't asleep if manageability is enabled */
4787         if (adapter->en_mng_pt)
4788                 *enable_wake = true;
4789
4790         if (netif_running(netdev))
4791                 e1000_free_irq(adapter);
4792
4793         pci_disable_device(pdev);
4794
4795         return 0;
4796 }
4797
4798 #ifdef CONFIG_PM
4799 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
4800 {
4801         int retval;
4802         bool wake;
4803
4804         retval = __e1000_shutdown(pdev, &wake);
4805         if (retval)
4806                 return retval;
4807
4808         if (wake) {
4809                 pci_prepare_to_sleep(pdev);
4810         } else {
4811                 pci_wake_from_d3(pdev, false);
4812                 pci_set_power_state(pdev, PCI_D3hot);
4813         }
4814
4815         return 0;
4816 }
4817
4818 static int e1000_resume(struct pci_dev *pdev)
4819 {
4820         struct net_device *netdev = pci_get_drvdata(pdev);
4821         struct e1000_adapter *adapter = netdev_priv(netdev);
4822         struct e1000_hw *hw = &adapter->hw;
4823         u32 err;
4824
4825         pci_set_power_state(pdev, PCI_D0);
4826         pci_restore_state(pdev);
4827         pci_save_state(pdev);
4828
4829         if (adapter->need_ioport)
4830                 err = pci_enable_device(pdev);
4831         else
4832                 err = pci_enable_device_mem(pdev);
4833         if (err) {
4834                 pr_err("Cannot enable PCI device from suspend\n");
4835                 return err;
4836         }
4837         pci_set_master(pdev);
4838
4839         pci_enable_wake(pdev, PCI_D3hot, 0);
4840         pci_enable_wake(pdev, PCI_D3cold, 0);
4841
4842         if (netif_running(netdev)) {
4843                 err = e1000_request_irq(adapter);
4844                 if (err)
4845                         return err;
4846         }
4847
4848         e1000_power_up_phy(adapter);
4849         e1000_reset(adapter);
4850         ew32(WUS, ~0);
4851
4852         e1000_init_manageability(adapter);
4853
4854         if (netif_running(netdev))
4855                 e1000_up(adapter);
4856
4857         netif_device_attach(netdev);
4858
4859         return 0;
4860 }
4861 #endif
4862
4863 static void e1000_shutdown(struct pci_dev *pdev)
4864 {
4865         bool wake;
4866
4867         __e1000_shutdown(pdev, &wake);
4868
4869         if (system_state == SYSTEM_POWER_OFF) {
4870                 pci_wake_from_d3(pdev, wake);
4871                 pci_set_power_state(pdev, PCI_D3hot);
4872         }
4873 }
4874
4875 #ifdef CONFIG_NET_POLL_CONTROLLER
4876 /*
4877  * Polling 'interrupt' - used by things like netconsole to send skbs
4878  * without having to re-enable interrupts. It's not called while
4879  * the interrupt routine is executing.
4880  */
4881 static void e1000_netpoll(struct net_device *netdev)
4882 {
4883         struct e1000_adapter *adapter = netdev_priv(netdev);
4884
4885         disable_irq(adapter->pdev->irq);
4886         e1000_intr(adapter->pdev->irq, netdev);
4887         enable_irq(adapter->pdev->irq);
4888 }
4889 #endif
4890
4891 /**
4892  * e1000_io_error_detected - called when PCI error is detected
4893  * @pdev: Pointer to PCI device
4894  * @state: The current pci connection state
4895  *
4896  * This function is called after a PCI bus error affecting
4897  * this device has been detected.
4898  */
4899 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
4900                                                 pci_channel_state_t state)
4901 {
4902         struct net_device *netdev = pci_get_drvdata(pdev);
4903         struct e1000_adapter *adapter = netdev_priv(netdev);
4904
4905         netif_device_detach(netdev);
4906
4907         if (state == pci_channel_io_perm_failure)
4908                 return PCI_ERS_RESULT_DISCONNECT;
4909
4910         if (netif_running(netdev))
4911                 e1000_down(adapter);
4912         pci_disable_device(pdev);
4913
4914         /* Request a slot slot reset. */
4915         return PCI_ERS_RESULT_NEED_RESET;
4916 }
4917
4918 /**
4919  * e1000_io_slot_reset - called after the pci bus has been reset.
4920  * @pdev: Pointer to PCI device
4921  *
4922  * Restart the card from scratch, as if from a cold-boot. Implementation
4923  * resembles the first-half of the e1000_resume routine.
4924  */
4925 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4926 {
4927         struct net_device *netdev = pci_get_drvdata(pdev);
4928         struct e1000_adapter *adapter = netdev_priv(netdev);
4929         struct e1000_hw *hw = &adapter->hw;
4930         int err;
4931
4932         if (adapter->need_ioport)
4933                 err = pci_enable_device(pdev);
4934         else
4935                 err = pci_enable_device_mem(pdev);
4936         if (err) {
4937                 pr_err("Cannot re-enable PCI device after reset.\n");
4938                 return PCI_ERS_RESULT_DISCONNECT;
4939         }
4940         pci_set_master(pdev);
4941
4942         pci_enable_wake(pdev, PCI_D3hot, 0);
4943         pci_enable_wake(pdev, PCI_D3cold, 0);
4944
4945         e1000_reset(adapter);
4946         ew32(WUS, ~0);
4947
4948         return PCI_ERS_RESULT_RECOVERED;
4949 }
4950
4951 /**
4952  * e1000_io_resume - called when traffic can start flowing again.
4953  * @pdev: Pointer to PCI device
4954  *
4955  * This callback is called when the error recovery driver tells us that
4956  * its OK to resume normal operation. Implementation resembles the
4957  * second-half of the e1000_resume routine.
4958  */
4959 static void e1000_io_resume(struct pci_dev *pdev)
4960 {
4961         struct net_device *netdev = pci_get_drvdata(pdev);
4962         struct e1000_adapter *adapter = netdev_priv(netdev);
4963
4964         e1000_init_manageability(adapter);
4965
4966         if (netif_running(netdev)) {
4967                 if (e1000_up(adapter)) {
4968                         pr_info("can't bring device back up after reset\n");
4969                         return;
4970                 }
4971         }
4972
4973         netif_device_attach(netdev);
4974 }
4975
4976 /* e1000_main.c */